diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 000000000000..18b43dea030c --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,384 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +image: khos2ow/ci-cd-tools:latest + +# Define CI stages +stages: + - test + - archive + - integration + - deploy + +# Global Variables +variables: + GIT_DEPTH: "40" + MAVEN_OPTS: '-Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=INFO + -Dorg.slf4j.simpleLogger.showDateTime=true + -Djava.awt.headless=true + -Dmaven.repo.local=/root/.m2/repository' + +.load_rpm_swift_function: &LOAD_REPO_SWIFT | + function load_swift() { + export OS_USERNAME=cloudops-pdion + export OS_TENANT_NAME=cloudops-jenkins-swift + export OS_PASSWORD=Vr8ySJw8PosU6s1V + export OS_AUTH_URL=https://auth-qc.cloud.ca/v2.0 + export OS_REGION_NAME=east + } + +.load_template_swift_function: &LOAD_TEMPLATE_SWIFT | + function load_swift() { + export OS_USERNAME=cloudops-pdion + export OS_TENANT_NAME=cloudops-jenkins + export OS_PASSWORD=Vr8ySJw8PosU6s1V + export OS_AUTH_URL=https://auth-qc.cloud.ca/v2.0 + export OS_REGION_NAME=east + } + +.yum_repo_path_function: &YUM_REPO_PATH | + function yum_repo_path() { + if [ "${CI_PROJECT_NAMESPACE}" = "dev" ]; then + if [ -n "${CI_COMMIT_TAG}" ]; then + local version=$(cd ${CI_PROJECT_DIR}; mvn -q -Dexec.executable="echo" -Dexec.args='${project.version}' --non-recursive org.codehaus.mojo:exec-maven-plugin:1.3.1:exec) + local major_version=`echo ${version} | cut -d. -f1`.`echo ${version} | cut -d. -f2` + local os_target="centos7" + + case "$version" in + *"-SNAPSHOT") local stable_prefix="unstable" ;; + *) local stable_prefix="stable" ;; + esac + + echo "${stable_prefix}/${major_version}/${os_target}" + elif [ "${CI_COMMIT_REF_NAME}" = "cca_4.12" -o "${CI_COMMIT_REF_NAME}" = "cca_4.11" -o "${CI_COMMIT_REF_NAME}" = "cca_4.10" ]; then + local version=$(cd ${CI_PROJECT_DIR}; mvn -q -Dexec.executable="echo" -Dexec.args='${project.version}' --non-recursive org.codehaus.mojo:exec-maven-plugin:1.3.1:exec) + local major_version=`echo ${version} | cut -d. -f1`.`echo ${version} | cut -d. -f2` + local os_target="centos7" + + case "$version" in + *"-SNAPSHOT") local stable_prefix="unstable" ;; + *) local stable_prefix="stable" ;; + esac + + echo "${stable_prefix}/${major_version}/${os_target}" + else + echo "dev/${CI_COMMIT_REF_NAME}" + fi + else + echo "dev/${CI_COMMIT_REF_NAME}" + fi + } + +.template_path_function: &TEMPLATE_PATH | + function template_path() { + if [ "${CI_PROJECT_NAMESPACE}" = "dev" ]; then + if [ "${CI_COMMIT_REF_NAME}" = "cca_4.12" -o "${CI_COMMIT_REF_NAME}" = "cca_4.11" -o "${CI_COMMIT_REF_NAME}" = "cca_4.10" ]; then + local version=$(cd ${CI_PROJECT_DIR}; mvn -q -Dexec.executable="echo" -Dexec.args='${project.version}' --non-recursive org.codehaus.mojo:exec-maven-plugin:1.3.1:exec) + local major_version=`echo ${version} | cut -d. -f1`.`echo ${version} | cut -d. -f2` + + case "$version" in + *"-SNAPSHOT") local stable_prefix="unstable" ;; + *) local stable_prefix="stable" ;; + esac + + echo "${stable_prefix}/${major_version}" + else + echo "dev/${CI_COMMIT_REF_NAME}" + fi + else + echo "dev/${CI_COMMIT_REF_NAME}" + fi + } + +# RAT checks +Audit: + image: + name: khos2ow/cloudstack-rpm-builder:centos7 + entrypoint: ["/bin/bash", "-l", "-c"] + stage: test + before_script: + - environment-info.sh + script: + - mvn --activate-profiles developer,systemvm -Dsimulator --projects='org.apache.cloudstack:cloudstack' clean org.apache.rat:apache-rat-plugin:0.12:check + artifacts: + name: "audit_report_${CI_BUILD_REF_SLUG}" + paths: + - "target/rat.txt" + when: on_failure + expire_in: 1 day + only: + - branches + tags: + - java + +# Archive RPMs to object storage +RPM: + image: + name: khos2ow/cloudstack-rpm-builder:centos7 + entrypoint: ["/bin/bash", "-l", "-c"] + stage: archive + before_script: + - environment-info.sh + - pip3 install python-swiftclient + - pip3 install python-keystoneclient + - *LOAD_REPO_SWIFT + - *YUM_REPO_PATH + script: + # worksaround for SSLHandshakeException issue, https://github.com/apache/cloudstack/issues/2682#issuecomment-392973706 + - cat ${CI_PROJECT_DIR}/client/conf/java.security.ciphers.in >> /usr/lib/jvm/java-1.8.0-openjdk/jre/lib/security/java.security + + # download required vhd-util file, if it doesn't exist + - wget http://download.cloudstack.org/tools/vhd-util --directory-prefix=${CI_PROJECT_DIR}/scripts/vm/hypervisor/xenserver + + # do the packaging and creating RPMs + - /usr/local/bin/docker-entrypoint.sh --workspace-path ${CI_PROJECT_DIR} --distribution centos7 --use-timestamp + + # upload RPMs to swift object storage + - | + load_swift + + swift_path=`yum_repo_path` + namespace="cloudstack" + + # upload to swift + swift post ${namespace} -r '.r:*,.rlistings' + swift upload ${namespace} --object-name ${swift_path} ${CI_PROJECT_DIR}/dist/rpmbuild/RPMS + + mkdir -p ${CI_PROJECT_DIR}/target/rpms + + # fix repo metadata + if [ -n "${swift_path}" ]; then + swift download ${namespace} --prefix ${swift_path} --output-dir=${CI_PROJECT_DIR}/target/rpms + createrepo --update ${CI_PROJECT_DIR}/target/rpms/${swift_path} + swift delete ${namespace} --prefix ${swift_path}/repodata + swift upload ${namespace} --object-name ${swift_path}/repodata/ ${CI_PROJECT_DIR}/target/rpms/${swift_path}/repodata/ + fi + artifacts: + name: "test_report_${CI_BUILD_REF_SLUG}" + paths: + - "*/target/surefire-reports" + - "*/*/target/surefire-reports" + - "*/*/*/target/surefire-reports" + - "*/*/*/*/target/surefire-reports" + + - "*/target/checkstyle-result.xml" + - "*/*/target/checkstyle-result.xml" + - "*/*/*/target/checkstyle-result.xml" + - "*/*/*/*/target/checkstyle-result.xml" + when: on_failure + expire_in: 1 day + only: + - branches + tags: + - java + +# Archive SystemVM Template to object storage +SysVM Template: + stage: archive + when: manual + variables: + MAVEN_OPTS: '-Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=INFO + -Dorg.slf4j.simpleLogger.showDateTime=true + -Djava.awt.headless=true + -Dmaven.repo.local=~/maven-repo/repository' + before_script: + - *LOAD_TEMPLATE_SWIFT + - *TEMPLATE_PATH + script: + - | + export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 + + # get the version before going any further + VERSION=$(mvn -q -Dexec.executable="echo" -Dexec.args='${project.version}' --non-recursive org.codehaus.mojo:exec-maven-plugin:1.3.1:exec) + TIMESTAMP=$(date +%s) + + # clear out BUILD_NUMBER to not be shown in system vm template file name + BUILD_NUMBER="" + MAJOR_VERSION=`echo ${VERSION} | cut -d. -f1`.`echo ${VERSION} | cut -d. -f2` + + cd ${CI_PROJECT_DIR}/tools/appliance + + build_appliance="systemvmtemplate" + build_version=$(echo $VERSION | sed 's/\-SNAPSHOT/\-'${TIMESTAMP}'/g') + build_branch= + + chmod +x build.sh + ./build.sh "$build_appliance" "$build_version" "$build_branch" + + # upload templates to swift object storage + - | + load_swift + + swift_path=`template_path` + namespace="systemvm" + + # move file to end destination + mkdir -p ${CI_PROJECT_DIR}/target/templates + mv ${CI_PROJECT_DIR}/tools/appliance/dist/*-xen.vhd.bz2 ${CI_PROJECT_DIR}/target/templates + mv ${CI_PROJECT_DIR}/tools/appliance/dist/*-kvm.qcow2.bz2 ${CI_PROJECT_DIR}/target/templates + + # create md5sum file + full_name=`basename ${CI_PROJECT_DIR}/target/templates/*.qcow2.bz2 | sed 's/\-kvm\.qcow2\.bz2//g'` + partial_name=$(echo ${full_name} | sed 's/systemvmtemplate\-//g' | sed 's/systemvm64template\-//g') + md5sum=$(cd ${CI_PROJECT_DIR}/target/templates; md5sum * > ${full_name}.md5sum; cat ${full_name}.md5sum) + + # upload to swift + swift post ${namespace} -r '.r:*,.rlistings' + swift upload ${namespace} --object-name ${swift_path} ${CI_PROJECT_DIR}/target/templates | xargs -n1 swift stat -v ${namespace} | grep URL | sort | sed 's/URL\://g' | tr -d ' ' + after_script: + - git clean -fdx + only: + - branches + tags: + - systemvm + +# Run integration tests against live build +Integration Test: + stage: integration + when: manual + before_script: + - environment-info.sh + script: + - echo "TODO" + only: + - branches + except: + - master + - cca_4.10 + - cca_4.11 + - cca_4.12 + +.deploy: &DEPLOY + stage: deploy + when: manual + before_script: + - environment-info.sh + - *YUM_REPO_PATH + script: + - | + mkdir -p ~/.ssh + + echo "${SSH_PRIVATE_KEY}" > ~/.ssh/id_rsa + chmod 700 ~/.ssh/id_rsa + + ssh-keyscan ${LAB_ENV_IP} >> ~/.ssh/known_hosts + chmod 644 ~/.ssh/known_hosts + + export REPO_BASE="https://objects-east.cloud.ca/v1/a8286006ae394ede8bc081f586ae048d/cloudstack/" + export REPO_PATH="`yum_repo_path`" + + ssh -T ${SSH_USER}@${LAB_ENV_IP} << EOF + sudo su - + + set -e + + # stop services + echo -e "stopping chef-client...\n" + systemctl stop chef-client + + echo -e "stopping cloudstack-usage...\n" + systemctl stop cloudstack-usage + + echo -e "stopping cloudstack-management...\n" + systemctl stop cloudstack-management + + # update yum repo + echo -e "updating /etc/yum.repos.d/cloudstack.repo...\n" + + sed -i "s|^baseurl=${REPO_BASE}.*|baseurl=${REPO_BASE}${REPO_PATH}|gI" /etc/yum.repos.d/cloudstack.repo + sed -i "s/^enabled=0/enabled=1/gI" /etc/yum.repos.d/cloudstack.repo + + cat /etc/yum.repos.d/cloudstack.repo + echo "" + + # upgrade cloudstack + echo -e "updating cloudstack rpm...\n" + yum clean metadata + yum upgrade --assumeyes cloudstack-* + + # start services + echo -e "starting cloudstack-management...\n" + systemctl start cloudstack-management + + echo -e "starting cloudstack-usage...\n" + systemctl start cloudstack-usage + + echo -e "starting chef-client...\n" + systemctl start chef-client + EOF + environment: + name: ${LAB_ENV_NAME} + +ccd-r1-acs1-acs01: + <<: *DEPLOY + variables: + LAB_ENV_NAME: "ccd-r1-acs1-acs01" + LAB_ENV_IP: "10.218.184.158" + +ccd-r1-acs2-acs01: + <<: *DEPLOY + variables: + LAB_ENV_NAME: "ccd-r1-acs2-acs01" + LAB_ENV_IP: "10.218.184.40" + +ccd-r1-bm1-acs01: + <<: *DEPLOY + variables: + LAB_ENV_NAME: "ccd-r1-bm1-acs01" + LAB_ENV_IP: "10.218.184.74" + +ccd-r1-dev1-acs01: + <<: *DEPLOY + variables: + LAB_ENV_NAME: "ccd-r1-dev1-acs01" + LAB_ENV_IP: "10.218.184.249" + +ccd-r1-dev2-acs01: + <<: *DEPLOY + variables: + LAB_ENV_NAME: "ccd-r1-dev2-acs01" + LAB_ENV_IP: "10.218.184.116" + +ccd-r1-stg-acs01: + <<: *DEPLOY + variables: + LAB_ENV_NAME: "ccd-r1-stg-acs01" + LAB_ENV_IP: "10.218.184.166" + +ccd-r1-stg-acs02: + <<: *DEPLOY + variables: + LAB_ENV_NAME: "ccd-r1-stg-acs02" + LAB_ENV_IP: "10.218.184.207" + +cca-r1-beta02-mtg02: + <<: *DEPLOY + variables: + LAB_ENV_NAME: "cca-r1-beta02-mtg02" + LAB_ENV_IP: "172.27.2.119" + +ccd-r1-acs3-acs01: + <<: *DEPLOY + variables: + LAB_ENV_NAME: "ccd-r1-acs3-acs01" + LAB_ENV_IP: "10.218.184.156" + +ccd-r1-acs4: + <<: *DEPLOY + variables: + LAB_ENV_NAME: "ccd-r1-acs4-acs01" + LAB_ENV_IP: "10.218.184.142" diff --git a/api/pom.xml b/api/pom.xml index 9e4b646eec67..b1879ead164e 100644 --- a/api/pom.xml +++ b/api/pom.xml @@ -66,6 +66,10 @@ cloud-framework-direct-download ${project.version} + + com.bettercloud + vault-java-driver + diff --git a/api/src/main/java/com/cloud/agent/api/to/DataStoreTO.java b/api/src/main/java/com/cloud/agent/api/to/DataStoreTO.java index 1c901a647cbc..7be8d3770aae 100644 --- a/api/src/main/java/com/cloud/agent/api/to/DataStoreTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/DataStoreTO.java @@ -28,4 +28,8 @@ public interface DataStoreTO { String getUrl(); String getPathSeparator(); + + default boolean isPartialBackupCapable() { + return true; + } } diff --git a/api/src/main/java/com/cloud/agent/api/to/DataTO.java b/api/src/main/java/com/cloud/agent/api/to/DataTO.java index af43aa102e84..94fc89da004e 100644 --- a/api/src/main/java/com/cloud/agent/api/to/DataTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/DataTO.java @@ -32,5 +32,7 @@ public interface DataTO { */ String getPath(); + String getName(); + long getId(); } diff --git a/api/src/main/java/com/cloud/agent/api/to/S3TO.java b/api/src/main/java/com/cloud/agent/api/to/S3TO.java index 233238cf793d..e3c3d40c06ce 100644 --- a/api/src/main/java/com/cloud/agent/api/to/S3TO.java +++ b/api/src/main/java/com/cloud/agent/api/to/S3TO.java @@ -239,6 +239,11 @@ public String getPathSeparator() { return pathSeparator; } + @Override + public boolean isPartialBackupCapable() { + return false; + } + @Override public boolean equals(final Object thatObject) { diff --git a/api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java b/api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java index 8f58c9e1c917..ec33b9763827 100644 --- a/api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java @@ -20,6 +20,7 @@ import com.cloud.storage.StoragePool; public class StorageFilerTO { + boolean isManaged; long id; String uuid; String host; @@ -36,6 +37,7 @@ public StorageFilerTO(StoragePool pool) { this.type = pool.getPoolType(); this.uuid = pool.getUuid(); this.userInfo = pool.getUserInfo(); + this.isManaged = pool.isManaged(); } public long getId() { @@ -66,6 +68,10 @@ public StoragePoolType getType() { return type; } + public boolean isManaged(){ + return isManaged; + } + protected StorageFilerTO() { } diff --git a/api/src/main/java/com/cloud/agent/api/to/SwiftTO.java b/api/src/main/java/com/cloud/agent/api/to/SwiftTO.java index b89dfea40e0c..e4aa8f276212 100644 --- a/api/src/main/java/com/cloud/agent/api/to/SwiftTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/SwiftTO.java @@ -89,4 +89,9 @@ public String getUuid() { public String getPathSeparator() { return pathSeparator; } + + @Override + public boolean isPartialBackupCapable() { + return false; + } } diff --git a/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java b/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java index 5fc248343ecc..0f263de1a9f0 100644 --- a/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java @@ -16,11 +16,12 @@ // under the License. package com.cloud.agent.api.to; +import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.HashMap; import com.cloud.network.element.NetworkElement; +import com.cloud.storage.Storage; import com.cloud.template.VirtualMachineTemplate.BootloaderType; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.Type; @@ -82,6 +83,8 @@ public class VirtualMachineTO { Map extraConfig = new HashMap<>(); DeployAsIsInfoTO deployAsIsInfo; + Storage.ImageFormat format; + public VirtualMachineTO(long id, String instanceName, VirtualMachine.Type type, int cpus, Integer speed, long minRam, long maxRam, BootloaderType bootloader, String os, boolean enableHA, boolean limitCpuUse, String vncPassword) { this.id = id; @@ -422,4 +425,12 @@ public void setDeployAsIsInfo(DeployAsIsInfoTO deployAsIsInfo) { public String toString() { return String.format("VM {id: \"%s\", name: \"%s\", uuid: \"%s\", type: \"%s\"}", id, name, uuid, type); } + + public Storage.ImageFormat getFormat() { + return format; + } + + public void setFormat(Storage.ImageFormat format) { + this.format = format; + } } diff --git a/api/src/main/java/com/cloud/event/EventTypes.java b/api/src/main/java/com/cloud/event/EventTypes.java index 289276fe663e..a8b5fbb371d7 100644 --- a/api/src/main/java/com/cloud/event/EventTypes.java +++ b/api/src/main/java/com/cloud/event/EventTypes.java @@ -501,6 +501,9 @@ public class EventTypes { public static final String EVENT_VPC_OFFERING_UPDATE = "VPC.OFFERING.UPDATE"; public static final String EVENT_VPC_OFFERING_DELETE = "VPC.OFFERING.DELETE"; + // VPC source NAT + public static final String EVENT_VPC_SOURCE_NAT_UPDATE = "VPC.SOURCE.NAT.UPDATE"; + // Private gateway public static final String EVENT_PRIVATE_GATEWAY_CREATE = "PRIVATE.GATEWAY.CREATE"; public static final String EVENT_PRIVATE_GATEWAY_DELETE = "PRIVATE.GATEWAY.DELETE"; diff --git a/api/src/main/java/com/cloud/exception/RemoteAccessVpnException.java b/api/src/main/java/com/cloud/exception/RemoteAccessVpnException.java new file mode 100644 index 000000000000..93a34d8ce3a8 --- /dev/null +++ b/api/src/main/java/com/cloud/exception/RemoteAccessVpnException.java @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.exception; + +/** + * @since 4.10.0.228-cca + */ +public class RemoteAccessVpnException extends ManagementServerException { + private static final long serialVersionUID = -5851224796385227880L; + + public RemoteAccessVpnException(String message) { + super(message); + } +} diff --git a/api/src/main/java/com/cloud/gpu/GPU.java b/api/src/main/java/com/cloud/gpu/GPU.java index 8aa54c0c0f87..e36d62ddbf92 100644 --- a/api/src/main/java/com/cloud/gpu/GPU.java +++ b/api/src/main/java/com/cloud/gpu/GPU.java @@ -32,6 +32,34 @@ public enum GPUType { GRID_K220Q("GRID K220Q"), GRID_K240Q("GRID K240Q"), GRID_K260("GRID K260Q"), + GRID_V100D_32A("GRID V100D-32A"), + GRID_V100D_8Q("GRID V100D-8Q"), + GRID_V100D_4A("GRID V100D-4A"), + GRID_V100D_1B("GRID V100D-1B"), + GRID_V100D_2Q("GRID V100D-2Q"), + GRID_V100D_4Q("GRID V100D-4Q"), + GRID_V100D_2A("GRID V100D-2A"), + GRID_V100D_2B("GRID V100D-2B"), + GRID_V100D_32Q("GRID V100D-32Q"), + GRID_V100D_16A("GRID V100D-16A"), + GRID_V100D_1Q("GRID V100D-1Q"), + GRID_V100D_2B4("GRID V100D-2B4"), + GRID_V100D_16Q("GRID V100D-16Q"), + GRID_V100D_8A("GRID V100D-8A"), + GRID_V100D_1A("GRID V100D-1A"), + GRID_T4_16A("GRID T4-16A"), + GRID_T4_2B4("GRID T4-2B4"), + GRID_T4_4Q("GRID T4-4Q"), + GRID_T4_16Q("GRID T4-16Q"), + GRID_T4_4A("GRID T4-4A"), + GRID_T4_1A("GRID T4-1A"), + GRID_T4_2Q("GRID T4-2Q"), + GRID_T4_2B("GRID T4-2B"), + GRID_T4_8Q("GRID T4-8Q"), + GRID_T4_2A("GRID T4-2A"), + GRID_T4_1B("GRID T4-1B"), + GRID_T4_1Q("GRID T4-1Q"), + GRID_T4_8A("GRID T4-8A"), passthrough("passthrough"); private String type; diff --git a/api/src/main/java/com/cloud/network/RemoteAccessVpn.java b/api/src/main/java/com/cloud/network/RemoteAccessVpn.java index 25b4fbbcdeba..52257e58bbea 100644 --- a/api/src/main/java/com/cloud/network/RemoteAccessVpn.java +++ b/api/src/main/java/com/cloud/network/RemoteAccessVpn.java @@ -32,6 +32,8 @@ enum State { String getIpsecPresharedKey(); + String getCaCertificate(); + String getLocalIp(); Long getNetworkId(); @@ -42,4 +44,6 @@ enum State { @Override boolean isDisplay(); + + String getVpnType(); } diff --git a/api/src/main/java/com/cloud/network/vpc/Vpc.java b/api/src/main/java/com/cloud/network/vpc/Vpc.java index 432c8839ad89..985046cb37fe 100644 --- a/api/src/main/java/com/cloud/network/vpc/Vpc.java +++ b/api/src/main/java/com/cloud/network/vpc/Vpc.java @@ -16,12 +16,12 @@ // under the License. package com.cloud.network.vpc; +import java.util.Date; + import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; -import java.util.Date; - public interface Vpc extends ControlledEntity, Identity, InternalIdentity { public enum State { @@ -56,6 +56,12 @@ public enum State { */ long getVpcOfferingId(); + /** + * + * @return Network boot ip + */ + String getNetworkBootIp(); + /** * * @return VPC display text @@ -95,4 +101,6 @@ public enum State { void setRollingRestart(boolean rollingRestart); Date getCreated(); + + void setNetworkBootIp(String networkBootIp); } diff --git a/api/src/main/java/com/cloud/network/vpc/VpcService.java b/api/src/main/java/com/cloud/network/vpc/VpcService.java index 088239708f19..45d19cd2c9d9 100644 --- a/api/src/main/java/com/cloud/network/vpc/VpcService.java +++ b/api/src/main/java/com/cloud/network/vpc/VpcService.java @@ -50,7 +50,7 @@ public interface VpcService { * @return * @throws ResourceAllocationException TODO */ - public Vpc createVpc(long zoneId, long vpcOffId, long vpcOwnerId, String vpcName, String displayText, String cidr, String networkDomain, Boolean displayVpc) + public Vpc createVpc(long zoneId, long vpcOffId, long vpcOwnerId, String vpcName, String displayText, String cidr, String networkDomain, Boolean displayVpc, String networkBootIp) throws ResourceAllocationException; /** @@ -74,7 +74,7 @@ public Vpc createVpc(long zoneId, long vpcOffId, long vpcOwnerId, String vpcName * @param displayVpc TODO * @return */ - public Vpc updateVpc(long vpcId, String vpcName, String displayText, String customId, Boolean displayVpc); + public Vpc updateVpc(long vpcId, String vpcName, String displayText, String customId, Boolean displayVpc, String networkBootip); /** * Lists VPC(s) based on the parameters passed to the method call @@ -97,12 +97,12 @@ public Vpc createVpc(long zoneId, long vpcOffId, long vpcOwnerId, String vpcName * @param tags TODO * @param projectId TODO * @param display TODO - * @param vpc + * @param networkBootIp * @return */ public Pair, Integer> listVpcs(Long id, String vpcName, String displayText, List supportedServicesStr, String cidr, Long vpcOffId, String state, String accountName, Long domainId, String keyword, Long startIndex, Long pageSizeVal, Long zoneId, Boolean isRecursive, Boolean listAll, Boolean restartRequired, - Map tags, Long projectId, Boolean display); + Map tags, Long projectId, Boolean display, String networkBootIp); /** * Starts VPC which includes starting VPC provider and applying all the neworking rules on the backend @@ -132,12 +132,14 @@ public Pair, Integer> listVpcs(Long id, String vpcName, Stri * @param id * @param cleanUp * @param makeredundant + * @param migrateVpn * @return * @throws InsufficientCapacityException */ + boolean restartVpc(RestartVPCCmd cmd) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException; - boolean restartVpc(Long networkId, boolean cleanup, boolean makeRedundant, boolean livePatch, User user) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException; + boolean restartVpc(Long networkId, boolean cleanup, boolean makeRedundant, boolean livePatch, User user, boolean migrateVpn) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException; /** * Returns a Private gateway found in the VPC by id @@ -260,4 +262,11 @@ IpAddress associateIPToVpc(long ipId, long vpcId) throws ResourceAllocationExcep */ public boolean applyStaticRoute(long routeId) throws ResourceUnavailableException; + /** + * + * @param vpcId + * @return + * @throws InsufficientAddressCapacityException + */ + boolean updateVpcSourceNAT(final long vpcId) throws InsufficientCapacityException, ResourceUnavailableException; } diff --git a/api/src/main/java/com/cloud/network/vpn/RemoteAccessVpnService.java b/api/src/main/java/com/cloud/network/vpn/RemoteAccessVpnService.java index bbb9771d27aa..5152b348c056 100644 --- a/api/src/main/java/com/cloud/network/vpn/RemoteAccessVpnService.java +++ b/api/src/main/java/com/cloud/network/vpn/RemoteAccessVpnService.java @@ -22,6 +22,7 @@ import org.apache.cloudstack.api.command.user.vpn.ListVpnUsersCmd; import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.RemoteAccessVpnException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.RemoteAccessVpn; import com.cloud.network.VpnUser; @@ -29,9 +30,15 @@ import com.cloud.utils.Pair; public interface RemoteAccessVpnService { - static final String RemoteAccessVpnClientIpRangeCK = "remote.access.vpn.client.iprange"; + enum Type { + L2TP, IKEV2 + } - RemoteAccessVpn createRemoteAccessVpn(long vpnServerAddressId, String ipRange, boolean openFirewall, Boolean forDisplay) throws NetworkRuleConflictException; + String RemoteAccessVpnTypeConfigKey = "remote.access.vpn.type"; + String RemoteAccessVpnClientIpRangeCK = "remote.access.vpn.client.iprange"; + + RemoteAccessVpn createRemoteAccessVpn(long vpnServerAddressId, String ipRange, boolean openFirewall, Boolean forDisplay) + throws NetworkRuleConflictException, RemoteAccessVpnException; boolean destroyRemoteAccessVpnForIp(long ipId, Account caller, boolean forceCleanup) throws ResourceUnavailableException; @@ -59,4 +66,6 @@ public interface RemoteAccessVpnService { RemoteAccessVpn updateRemoteAccessVpn(long id, String customId, Boolean forDisplay); + boolean migrateRemoteAccessVpn(long accountId, long vpcId); + } diff --git a/api/src/main/java/com/cloud/offering/DiskOffering.java b/api/src/main/java/com/cloud/offering/DiskOffering.java index 8f2a0c9f761c..a1b8a133e627 100644 --- a/api/src/main/java/com/cloud/offering/DiskOffering.java +++ b/api/src/main/java/com/cloud/offering/DiskOffering.java @@ -16,13 +16,12 @@ // under the License. package com.cloud.offering; -import java.util.Date; - +import com.cloud.storage.Storage.ProvisioningType; import org.apache.cloudstack.acl.InfrastructureEntity; import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; -import com.cloud.storage.Storage.ProvisioningType; +import java.util.Date; /** * Represents a disk offering that specifies what the end user needs in @@ -138,6 +137,22 @@ public String toString() { Long getIopsWriteRateMaxLength(); + Long getMinIopsPerGb(); + + void setMinIopsPerGb(Long minIopsPerGB); + + Long getMaxIopsPerGb(); + + void setMaxIopsPerGb(Long maxIopsPerGB); + + Long getHighestMinIops(); + + void setHighestMinIops(Long highestMinIops); + + Long getHighestMaxIops(); + + void setHighestMaxIops(Long highestMaxIops); + void setHypervisorSnapshotReserve(Integer hypervisorSnapshotReserve); Integer getHypervisorSnapshotReserve(); diff --git a/api/src/main/java/com/cloud/storage/Storage.java b/api/src/main/java/com/cloud/storage/Storage.java index 300944559d62..fd9b9f737550 100644 --- a/api/src/main/java/com/cloud/storage/Storage.java +++ b/api/src/main/java/com/cloud/storage/Storage.java @@ -34,7 +34,8 @@ public static enum ImageFormat { VDI(true, true, false, "vdi"), TAR(false, false, false, "tar"), ZIP(false, false, false, "zip"), - DIR(false, false, false, "dir"); + DIR(false, false, false, "dir"), + PXEBOOT(false, false, false, "PXEBOOT"); private final boolean supportThinProvisioning; private final boolean supportSparse; diff --git a/api/src/main/java/com/cloud/template/VirtualMachineTemplate.java b/api/src/main/java/com/cloud/template/VirtualMachineTemplate.java index 95d1ebf0b87a..d7b3d6428ea5 100644 --- a/api/src/main/java/com/cloud/template/VirtualMachineTemplate.java +++ b/api/src/main/java/com/cloud/template/VirtualMachineTemplate.java @@ -97,6 +97,8 @@ public enum TemplateFilter { boolean isRequiresHvm(); + String getBootFilename(); + String getDisplayText(); boolean isEnablePassword(); diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 55002f70b1b2..3b8b0b2205fd 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -45,6 +45,7 @@ public class ApiConstants { public static final String BASE64_IMAGE = "base64image"; public static final String BITS = "bits"; public static final String BOOTABLE = "bootable"; + public static final String BOOT_FILENAME = "bootfilename"; public static final String BIND_DN = "binddn"; public static final String BIND_PASSWORD = "bindpass"; public static final String BYTES_READ_RATE = "bytesreadrate"; @@ -668,6 +669,7 @@ public class ApiConstants { public static final String REGION_ID = "regionid"; public static final String VPC_OFF_ID = "vpcofferingid"; public static final String VPC_OFF_NAME = "vpcofferingname"; + public static final String NETWORK_BOOT_IP = "networkbootip"; public static final String NETWORK = "network"; public static final String VPC_ID = "vpcid"; public static final String VPC_NAME = "vpcname"; @@ -679,6 +681,7 @@ public class ApiConstants { public static final String S2S_VPN_GATEWAY_ID = "s2svpngatewayid"; public static final String S2S_CUSTOMER_GATEWAY_ID = "s2scustomergatewayid"; public static final String IPSEC_PSK = "ipsecpsk"; + public static final String MIGRATE_VPN = "migratevpn"; public static final String GUEST_IP = "guestip"; public static final String REMOVED = "removed"; public static final String COMPLETED = "completed"; @@ -742,6 +745,19 @@ public class ApiConstants { public static final String AUTOSCALE_USER_ID = "autoscaleuserid"; public static final String BAREMETAL_DISCOVER_NAME = "baremetaldiscovername"; public static final String BAREMETAL_RCT_URL = "baremetalrcturl"; + public static final String BAREMETAL_MAAS = "baremetalmaas"; + public static final String BAREMETAL_MAAS_ACTION = "baremetalmaasaction"; + public static final String BAREMETAL_MAAS_ACTION_CREATE = "baremetalmaascreate"; + public static final String BAREMETAL_MAAS_ACTION_IMPORT = "baremetalmaasimport"; + public static final String BAREMETAL_MAAS_HOST = "baremetalmaashost"; + public static final String BAREMETAL_MAAS_KEY = "baremetalmaaskey"; + public static final String BAREMETAL_MAAS_POOL = "baremetalmaaspool"; + public static final String BAREMETAL_MAAS_NODE_ID = "baremetalmaasnodeid"; + public static final String BAREMETAL_MAAS_OFFERING_ID = "offeringid"; + public static final String BAREMETAL_MAAS_OFFERING_NAME = "offeringname"; + public static final String BAREMETAL_MAAS_AVIALBALE_COUNT = "available"; + public static final String BAREMETAL_MAAS_TOTAL_COUNT = "total"; + public static final String BAREMETAL_MAAS_ERASING_COUNT = "erasing"; public static final String UCS_DN = "ucsdn"; public static final String GSLB_PROVIDER = "gslbprovider"; public static final String EXCLUSIVE_GSLB_PROVIDER = "isexclusivegslbprovider"; @@ -835,6 +851,10 @@ public class ApiConstants { public static final String NETSCALER_CONTROLCENTER_ID = "netscalercontrolcenterid"; public static final String NETSCALER_SERVICEPACKAGE_ID = "netscalerservicepackageid"; public static final String FETCH_ROUTER_HEALTH_CHECK_RESULTS = "fetchhealthcheckresults"; + public static final String MIN_IOPS_PER_GB = "miniopspergb"; + public static final String MAX_IOPS_PER_GB = "maxiopspergb"; + public static final String HIGHEST_MIN_IOPS = "highestminiops"; + public static final String HIGHEST_MAX_IOPS = "highestmaxiops"; public static final String ZONE_ID_LIST = "zoneids"; public static final String DESTINATION_ZONE_ID_LIST = "destzoneids"; diff --git a/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java index 08f390f19724..85394a24bd25 100644 --- a/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java @@ -79,6 +79,9 @@ public abstract class BaseUpdateTemplateOrIsoCmd extends BaseCmd { description = "optional boolean field, which indicates if details should be cleaned up or not (if set to true, details removed for this resource, details field ignored; if false or not set, no action)") private Boolean cleanupDetails; + @Parameter(name = ApiConstants.BOOT_FILENAME, type = CommandType.STRING, description = "PXE boot filename on the TFTP server.") + private String bootFilename; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -143,4 +146,6 @@ public Map getDetails() { public boolean isCleanupDetails(){ return cleanupDetails == null ? false : cleanupDetails.booleanValue(); } + + public String getBootFilename() { return bootFilename; } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java index e258d72ca381..782088df3b60 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java @@ -111,10 +111,22 @@ public class AddClusterCmd extends BaseCmd { @Parameter(name = ApiConstants.OVM3_POOL, type = CommandType.STRING, required = false, description = "Ovm3 native pooling enabled for cluster") private String ovm3pool; + @Parameter(name = ApiConstants.OVM3_CLUSTER, type = CommandType.STRING, required = false, description = "Ovm3 native OCFS2 clustering enabled for cluster") private String ovm3cluster; + @Parameter(name = ApiConstants.OVM3_VIP, type = CommandType.STRING, required = false, description = "Ovm3 vip to use for pool (and cluster)") private String ovm3vip; + + @Parameter(name = ApiConstants.BAREMETAL_MAAS_HOST, type = CommandType.STRING, required = false, description = "The hostname or IP address of the MaaS server") + private String baremetalMaasHost; + + @Parameter(name = ApiConstants.BAREMETAL_MAAS_KEY, type = CommandType.STRING, required = false, description = "Administrator API key to access MaaS server") + private String baremetalMaasKey; + + @Parameter(name = ApiConstants.BAREMETAL_MAAS_POOL, type = CommandType.STRING, required = false, description = "Pool name in MaaS server to correspond this cluster with") + private String baremetalMaasPool; + public String getOvm3Pool() { return ovm3pool; } @@ -207,6 +219,18 @@ public void setAllocationState(String allocationState) { this.allocationState = allocationState; } + public String getBaremetalMaasHost() { + return baremetalMaasHost; + } + + public String getBaremetalMaasKey() { + return baremetalMaasKey; + } + + public String getBaremetalMaasPool() { + return baremetalMaasPool; + } + @Override public ApiCommandResourceType getApiResourceType() { return ApiCommandResourceType.Cluster; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java index b628ce44f1aa..91f05fd23f2b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java @@ -144,6 +144,18 @@ public class CreateDiskOfferingCmd extends BaseCmd { @Parameter(name = ApiConstants.MAX_IOPS, type = CommandType.LONG, required = false, description = "max iops of the disk offering") private Long maxIops; + @Parameter(name = ApiConstants.MIN_IOPS_PER_GB, type = CommandType.LONG, required = false, description = "IOPS/GB rate for min IOPS. miniops = size * miniopspergb") + private Long minIopsPerGb; + + @Parameter(name = ApiConstants.MAX_IOPS_PER_GB, type = CommandType.LONG, required = false, description = "IOPS/GB rate for max IOPS. maxiops = size * maxiopspergb") + private Long maxIopsPerGb; + + @Parameter(name = ApiConstants.HIGHEST_MIN_IOPS, type = CommandType.LONG, required = false, description = "Highest Min IOPS value that is allowed for this offering") + private Long highestMinIops; + + @Parameter(name = ApiConstants.HIGHEST_MAX_IOPS, type = CommandType.LONG, required = false, description = "Highest Max IOPS value that is allowed for this offering") + private Long highestMaxIops; + @Parameter(name = ApiConstants.HYPERVISOR_SNAPSHOT_RESERVE, type = CommandType.INTEGER, required = false, @@ -310,6 +322,21 @@ public boolean getDiskSizeStrictness() { return diskSizeStrictness != null ? diskSizeStrictness : false; } + public Long getMinIopsPerGb() { + return minIopsPerGb; + } + + public Long getMaxIopsPerGb() { + return maxIopsPerGb; + } + + public Long getHighestMinIops() { + return highestMinIops; + } + + public Long getHighestMaxIops() { + return highestMaxIops; + } ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCSourceNATCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCSourceNATCmd.java new file mode 100644 index 000000000000..2d3a9db08841 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCSourceNATCmd.java @@ -0,0 +1,88 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.vpc; + +import com.cloud.event.EventTypes; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.user.Account; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.api.response.VpcResponse; +import org.apache.log4j.Logger; + +@APICommand(name = "updateVPCSourceNAT", description = "Updates VPC Source NAT", responseObject = SuccessResponse.class, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) +public class UpdateVPCSourceNATCmd extends BaseAsyncCmd { + public static final Logger s_logger = Logger.getLogger(UpdateVPCSourceNATCmd.class.getName()); + private static final String s_name = "updatevpcsourcenatresponse"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = VpcResponse.class, required = true, description = "the UUID of the VPC") + private long id; + + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public long getId() { + return id; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public void execute() throws InsufficientCapacityException, ResourceUnavailableException { + boolean result = _vpcService.updateVpcSourceNAT(getId()); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update vpc source nat."); + } + } + + @Override + public String getEventType() { + return EventTypes.EVENT_VPC_SOURCE_NAT_UPDATE; + } + + @Override + public String getEventDescription() { + return "Updating VPC Source NAT id=" + getId(); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java index 255b11aaa248..ada0ee0634b4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java @@ -104,9 +104,12 @@ public class RegisterTemplateCmd extends BaseCmd implements UserCmd { @Parameter(name = ApiConstants.REQUIRES_HVM, type = CommandType.BOOLEAN, description = "true if this template requires HVM") private Boolean requiresHvm; + @Parameter(name = ApiConstants.BOOT_FILENAME, type = CommandType.STRING, description = "PXE boot filename on the TFTP server.") + private String bootFilename; + @Parameter(name = ApiConstants.URL, type = CommandType.STRING, - required = true, + required = false, length = 2048, description = "the URL of where the template is hosted. Possible URL include http:// and https://") private String url; @@ -219,6 +222,10 @@ public Boolean getRequiresHvm() { return requiresHvm; } + public String getBootFilename() { + return bootFilename; + } + public String getUrl() { return url; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java index 8f6568fbe594..45dcad6a9aef 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java @@ -83,6 +83,9 @@ public class CreateVPCCmd extends BaseAsyncCreateCmd implements UserCmd { required = true, description = "the ID of the VPC offering") private Long vpcOffering; + @Parameter(name = ApiConstants.NETWORK_BOOT_IP, type = CommandType.STRING, description = "the network boot ip of the VPC.") + private String networkBootIp; + @Parameter(name = ApiConstants.NETWORK_DOMAIN, type = CommandType.STRING, description = "VPC network domain. All networks inside the VPC will belong to this domain") private String networkDomain; @@ -127,6 +130,8 @@ public Long getVpcOffering() { return vpcOffering; } + public String getNetworkBootIp() { return networkBootIp; } + public String getNetworkDomain() { return networkDomain; } @@ -144,7 +149,7 @@ public Boolean getDisplayVpc() { @Override public void create() throws ResourceAllocationException { - Vpc vpc = _vpcService.createVpc(getZoneId(), getVpcOffering(), getEntityOwnerId(), getVpcName(), getDisplayText(), getCidr(), getNetworkDomain(), getDisplayVpc()); + Vpc vpc = _vpcService.createVpc(getZoneId(), getVpcOffering(), getEntityOwnerId(), getVpcName(), getDisplayText(), getCidr(), getNetworkDomain(), getDisplayVpc(), getNetworkBootIp()); if (vpc != null) { setEntityId(vpc.getId()); setEntityUuid(vpc.getUuid()); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/ListVPCsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/ListVPCsCmd.java index b230603f852f..0f01cb05c2d0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/ListVPCsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/ListVPCsCmd.java @@ -19,8 +19,6 @@ import java.util.ArrayList; import java.util.List; -import com.cloud.server.ResourceIcon; -import com.cloud.server.ResourceTag; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -36,6 +34,8 @@ import org.apache.log4j.Logger; import com.cloud.network.vpc.Vpc; +import com.cloud.server.ResourceIcon; +import com.cloud.server.ResourceTag; import com.cloud.utils.Pair; @@ -67,6 +67,9 @@ public class ListVPCsCmd extends BaseListTaggedResourcesCmd implements UserCmd { @Parameter(name = ApiConstants.VPC_OFF_ID, type = CommandType.UUID, entityType = VpcOfferingResponse.class, description = "list by ID of the VPC offering") private Long VpcOffId; + @Parameter(name = ApiConstants.NETWORK_BOOT_IP, type = CommandType.STRING, description = "the network boot ip of the VPC.") + private String networkBootIp; + @Parameter(name = ApiConstants.SUPPORTED_SERVICES, type = CommandType.LIST, collectionType = CommandType.STRING, description = "list VPC supporting certain services") private List supportedServices; @@ -107,6 +110,8 @@ public Long getVpcOffId() { return VpcOffId; } + public String getNetworkBootIp() { return networkBootIp; } + public Long getId() { return id; } @@ -144,7 +149,7 @@ public void execute() { Pair, Integer> vpcs = _vpcService.listVpcs(getId(), getVpcName(), getDisplayText(), getSupportedServices(), getCidr(), getVpcOffId(), getState(), getAccountName(), getDomainId(), getKeyword(), getStartIndex(), getPageSizeVal(), getZoneId(), isRecursive(), listAll(), getRestartRequired(), getTags(), - getProjectId(), getDisplay()); + getProjectId(), getDisplay(), getNetworkBootIp()); ListResponse response = new ListResponse(); List vpcResponses = new ArrayList(); for (Vpc vpc : vpcs.first()) { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/RestartVPCCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/RestartVPCCmd.java index a3fd58aa0704..a9aa3e3c2d1b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/RestartVPCCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/RestartVPCCmd.java @@ -52,6 +52,9 @@ public class RestartVPCCmd extends BaseAsyncCmd { @Parameter(name = ApiConstants.CLEANUP, type = CommandType.BOOLEAN, required = false, description = "If cleanup old network elements") private Boolean cleanup = false; + @Parameter(name = ApiConstants.MIGRATE_VPN, type = CommandType.BOOLEAN, required = false, description = "If migrate remote access VPN config") + private Boolean mirgateVpn; + @Parameter(name = ApiConstants.MAKEREDUNDANT, type = CommandType.BOOLEAN, required = false, description = "Turn a single VPC into a redundant one.") private Boolean makeredundant = false; @@ -72,6 +75,13 @@ public Boolean getCleanup() { return cleanup; } + public Boolean isMigrateVpn() { + if (mirgateVpn != null) { + return mirgateVpn; + } + return true; + } + public Boolean getMakeredundant() { return makeredundant; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/UpdateVPCCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/UpdateVPCCmd.java index 190a0e5cbc5d..909b86a6ae99 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/UpdateVPCCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/UpdateVPCCmd.java @@ -59,6 +59,9 @@ public class UpdateVPCCmd extends BaseAsyncCustomIdCmd implements UserCmd { @Parameter(name = ApiConstants.FOR_DISPLAY, type = CommandType.BOOLEAN, description = "an optional field, whether to the display the vpc to the end user or not", since = "4.4", authorized = {RoleType.Admin}) private Boolean display; + @Parameter(name = ApiConstants.NETWORK_BOOT_IP, type = CommandType.STRING, description = "the network boot ip of the VPC.") + private String networkBootIp; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -79,6 +82,8 @@ public Boolean isDisplayVpc() { return display; } + public String getNetworkBootIp() { return networkBootIp; } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -99,7 +104,7 @@ public long getEntityOwnerId() { @Override public void execute() { - Vpc result = _vpcService.updateVpc(getId(), getVpcName(), getDisplayText(), getCustomId(), isDisplayVpc()); + Vpc result = _vpcService.updateVpc(getId(), getVpcName(), getDisplayText(), getCustomId(), isDisplayVpc(), getNetworkBootIp()); if (result != null) { VpcResponse response = _responseGenerator.createVpcResponse(getResponseView(), result); response.setResponseName(getCommandName()); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java index b6ea5cc426cc..dc9af9e1de49 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java @@ -34,6 +34,7 @@ import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.RemoteAccessVpnException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.IpAddress; import com.cloud.network.RemoteAccessVpn; @@ -157,6 +158,10 @@ public void create() { s_logger.info("Network rule conflict: " + e.getMessage()); s_logger.trace("Network Rule Conflict: ", e); throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, e.getMessage()); + } catch (RemoteAccessVpnException e) { + s_logger.info("Create vpn internal error: " + e.getMessage()); + s_logger.trace("Create vpn internal error: ", e); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnCaCertificateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnCaCertificateCmd.java new file mode 100644 index 000000000000..88b35f2156ac --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnCaCertificateCmd.java @@ -0,0 +1,104 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.vpn; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.response.CertificateResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.pki.PkiDetail; +import org.apache.cloudstack.pki.PkiManager; + +import com.cloud.domain.Domain; +import com.cloud.exception.RemoteAccessVpnException; +import com.cloud.user.DomainService; +import com.cloud.utils.exception.CloudRuntimeException; + +/** + * @author Khosrow Moossavi + * @since 4.10.0.228-cca + */ +@APICommand(name = ListVpnCaCertificateCmd.APINAME, + description = "Lists the CA public certificate(s) as support by the configured/provided CA plugin", + responseObject = CertificateResponse.class, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + since = "4.10.0.228-cca", + authorized = { + RoleType.Admin, + RoleType.ResourceAdmin, + RoleType.DomainAdmin, + RoleType.User +}) +public class ListVpnCaCertificateCmd extends BaseCmd { + public static final String APINAME = "listVpnCaCertificate"; + + @Inject + private DomainService domainService; + + @Inject + private PkiManager pkiManager; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.DOMAIN, type = CommandType.STRING, description = "Name of the CA service provider, otherwise the default configured provider plugin will be used") + private String domain; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public String getDomain() { + return domain; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() { + final PkiDetail certificate; + try { + Domain domain = domainService.getDomain(getDomain()); + certificate = pkiManager.getCertificate(domain); + } catch (final RemoteAccessVpnException e) { + throw new CloudRuntimeException("Failed to get CA certificates for given domain"); + } + final CertificateResponse certificateResponse = new CertificateResponse("cacertificates"); + certificateResponse.setCertificate(certificate.getIssuingCa()); + certificateResponse.setResponseName(getCommandName()); + setResponseObject(certificateResponse); + } + + @Override + public String getCommandName() { + return APINAME.toLowerCase() + BaseCmd.RESPONSE_SUFFIX; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccountId(); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/CertificateResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/CertificateResponse.java index f8c3ecc74044..292d45d9710b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/CertificateResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/CertificateResponse.java @@ -23,6 +23,9 @@ import com.cloud.serializer.Param; import com.google.gson.annotations.SerializedName; +/** + * @since 4.10.0.228-cca + */ public class CertificateResponse extends BaseResponse { @SerializedName(ApiConstants.CERTIFICATE) @Param(description = "The client certificate") diff --git a/api/src/main/java/org/apache/cloudstack/api/response/DiskOfferingResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/DiskOfferingResponse.java index 1bea164d359b..bee386193310 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/DiskOfferingResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/DiskOfferingResponse.java @@ -164,6 +164,22 @@ public class DiskOfferingResponse extends BaseResponseWithAnnotations { @Param(description = "additional key/value details tied with this disk offering", since = "4.17") private Map details; + @SerializedName(ApiConstants.MIN_IOPS_PER_GB) + @Param(description = "IOPS/GB rate for min IOPS. miniops = size * miniopspergb") + private Long minIopsPerGb; + + @SerializedName(ApiConstants.MAX_IOPS_PER_GB) + @Param(description = "IOPS/GB rate for max IOPS. miniops = size * miniopspergb") + private Long maxIopsPerGb; + + @SerializedName(ApiConstants.HIGHEST_MIN_IOPS) + @Param(description = "Highest Min IOPS value that is allowed for this offering") + private Long highestMinIops; + + @SerializedName(ApiConstants.HIGHEST_MAX_IOPS) + @Param(description = "Highest Max IOPS value that is allowed for this offering") + private Long highestMaxIops; + public Boolean getDisplayOffering() { return displayOffering; } @@ -289,6 +305,38 @@ public Integer getHypervisorSnapshotReserve() { return hypervisorSnapshotReserve; } + public Long getMinIopsPerGb() { + return minIopsPerGb; + } + + public void setMinIopsPerGb(Long minIopsPerGb) { + this.minIopsPerGb = minIopsPerGb; + } + + public Long getMaxIopsPerGb() { + return maxIopsPerGb; + } + + public void setMaxIopsPerGb(Long maxIopsPerGb) { + this.maxIopsPerGb = maxIopsPerGb; + } + + public Long getHighestMinIops() { + return highestMinIops; + } + + public void setHighestMinIops(Long highestMinIops) { + this.highestMinIops = highestMinIops; + } + + public Long getHighestMaxIops() { + return highestMaxIops; + } + + public void setHighestMaxIops(Long highestMaxIops) { + this.highestMaxIops = highestMaxIops; + } + public void setHypervisorSnapshotReserve(Integer hypervisorSnapshotReserve) { this.hypervisorSnapshotReserve = hypervisorSnapshotReserve; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/RemoteAccessVpnResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/RemoteAccessVpnResponse.java index 0e078bea5bd7..baf2e7623f36 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/RemoteAccessVpnResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/RemoteAccessVpnResponse.java @@ -77,6 +77,14 @@ public class RemoteAccessVpnResponse extends BaseResponse implements ControlledE @Param(description = "is vpn for display to the regular user", since = "4.4", authorized = {RoleType.Admin}) private Boolean forDisplay; + @SerializedName(ApiConstants.TYPE) + @Param(description = "the type of remote access vpn implementation") + private String type; + + @SerializedName(ApiConstants.CERTIFICATE) + @Param(description = "the client certificate") + private String certificate; + public void setPublicIp(String publicIp) { this.publicIp = publicIp; } @@ -129,4 +137,12 @@ public void setId(String id) { public void setForDisplay(Boolean forDisplay) { this.forDisplay = forDisplay; } + + public void setType(String type) { + this.type = type; + } + + public void setCertificate(String certificate) { + this.certificate = certificate; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java index 892b5b85262d..d4961103dcbb 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java @@ -227,6 +227,10 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements @Param(description = "Base64 string representation of the resource icon", since = "4.16.0.0") ResourceIconResponse icon; + @SerializedName(ApiConstants.BOOT_FILENAME) + @Param(description = "The boot file name to use when pxe booting.") + private String bootFilename; + public TemplateResponse() { tags = new LinkedHashSet<>(); } @@ -467,4 +471,12 @@ public void setUrl(String url) { public void setResourceIconResponse(ResourceIconResponse icon) { this.icon = icon; } + + public String getBootFilename() { + return bootFilename; + } + + public void setBootFilename(String bootFilename) { + this.bootFilename = bootFilename; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/VpcResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/VpcResponse.java index 3b5661f8a80c..d34969bfb42b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/VpcResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/VpcResponse.java @@ -72,6 +72,10 @@ public class VpcResponse extends BaseResponseWithAnnotations implements Controll @Param(description = "vpc offering name the VPC is created from", since = "4.13.2") private String vpcOfferingName; + @SerializedName(ApiConstants.NETWORK_BOOT_IP) + @Param(description = "The network boot ip of VPC") + private String networkBootIp; + @SerializedName(ApiConstants.CREATED) @Param(description = "the date this VPC was created") private Date created; @@ -205,6 +209,8 @@ public void setVpcOfferingName(final String vpcOfferingName) { this.vpcOfferingName = vpcOfferingName; } + public void setNetworkBootIp(String networkBootIp) { this.networkBootIp = networkBootIp; } + public List getNetworks() { return networks; } diff --git a/api/src/main/java/org/apache/cloudstack/pki/PkiDetail.java b/api/src/main/java/org/apache/cloudstack/pki/PkiDetail.java new file mode 100644 index 000000000000..9c7b1a592e80 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/pki/PkiDetail.java @@ -0,0 +1,74 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.pki; + +/** + * @author Khosrow Moossavi + * @since 4.10.0.228-cca + */ +public class PkiDetail { + private String certificate; + private String issuingCa; + private String privateKey; + private String privateKeyType; + private String serialNumber; + + public PkiDetail certificate(final String certificate) { + this.certificate = certificate; + return this; + } + + public PkiDetail issuingCa(final String issuingCa) { + this.issuingCa = issuingCa; + return this; + } + + public PkiDetail privateKey(final String privateKey) { + this.privateKey = privateKey; + return this; + } + + public PkiDetail privateKeyType(final String privateKeyType) { + this.privateKeyType = privateKeyType; + return this; + } + + public PkiDetail serialNumber(final String serialNumber) { + this.serialNumber = serialNumber; + return this; + } + + public String getCertificate() { + return certificate; + } + + public String getIssuingCa() { + return issuingCa; + } + + public String getPrivateKey() { + return privateKey; + } + + public String getPrivateKeyType() { + return privateKeyType; + } + + public String getSerialNumber() { + return serialNumber; + } +} \ No newline at end of file diff --git a/api/src/main/java/org/apache/cloudstack/pki/PkiManager.java b/api/src/main/java/org/apache/cloudstack/pki/PkiManager.java new file mode 100644 index 000000000000..b3686ba7dd78 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/pki/PkiManager.java @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.pki; + +import com.cloud.domain.Domain; +import com.cloud.exception.RemoteAccessVpnException; +import com.cloud.utils.net.Ip; + +/** + * @author Khosrow Moossavi + * @since 4.10.0.228-cca + */ +public interface PkiManager { + String CREDENTIAL_ISSUING_CA = "credential.issuing.ca"; + String CREDENTIAL_SERIAL_NUMBER = "credential.serial.number"; + String CREDENTIAL_CERTIFICATE = "credential.certificate"; + String CREDENTIAL_PRIVATE_KEY = "credential.private.key"; + + /** + * Issue a Certificate for specific IP and specific Domain act as the CA + * + * @param domain object to extract name and id to be used to issuing CA + * @param publicIp to be included in the certificate + * + * @return detail about just signed PKI, including issuing CA, certificate, private key and serial number + * + * @throws RemoteAccessVpnException + */ + PkiDetail issueCertificate(Domain domain, Ip publicIp) throws RemoteAccessVpnException; + + /** + * Get a Certificate for specific Domain act as the CA + * + * @param domain object to extract its id to be find the issuing CA + * + * @return details about signed PKI, including issuing CA, certificate and serial number + * + * @throws RemoteAccessVpnException + */ + PkiDetail getCertificate(Domain domain) throws RemoteAccessVpnException; +} diff --git a/core/src/main/java/com/cloud/agent/api/baremetal/DestroyCommand.java b/core/src/main/java/com/cloud/agent/api/baremetal/DestroyCommand.java new file mode 100644 index 000000000000..6440b6f9f844 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/baremetal/DestroyCommand.java @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +package com.cloud.agent.api.baremetal; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.to.VirtualMachineTO; + +public class DestroyCommand extends Command { + + VirtualMachineTO vm; + boolean executeInSequence; + + public DestroyCommand(VirtualMachineTO vm, boolean executeInSequence) { + this.vm = vm; + this.executeInSequence = executeInSequence; + } + + @Override + public boolean executeInSequence() { + + if (vm.getName() != null && vm.getName().startsWith("r-")) { + return false; + } + return executeInSequence; + } + + public VirtualMachineTO getVm() { + return vm; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/routing/DhcpEntryCommand.java b/core/src/main/java/com/cloud/agent/api/routing/DhcpEntryCommand.java index 7fb65fe15cf9..06aab4a6e14c 100644 --- a/core/src/main/java/com/cloud/agent/api/routing/DhcpEntryCommand.java +++ b/core/src/main/java/com/cloud/agent/api/routing/DhcpEntryCommand.java @@ -37,6 +37,9 @@ public class DhcpEntryCommand extends NetworkElementCommand { boolean executeInSequence = false; boolean remove; + private String bootFilename; + private String networkBootIp; + public boolean isRemove() { return remove; } @@ -152,4 +155,20 @@ public boolean isDefault() { public void setDefault(boolean isDefault) { this.isDefault = isDefault; } + + public String getBootFilename() { + return bootFilename; + } + + public void setBootFilename(String bootFilename) { + this.bootFilename = bootFilename; + } + + public String getNetworkBootIp() { + return networkBootIp; + } + + public void setNetworkBootIp(String networkBootIp) { + this.networkBootIp = networkBootIp; + } } diff --git a/core/src/main/java/com/cloud/agent/api/routing/RemoteAccessVpnCfgCommand.java b/core/src/main/java/com/cloud/agent/api/routing/RemoteAccessVpnCfgCommand.java index c7dabe5b14d8..cb85597fbd1b 100644 --- a/core/src/main/java/com/cloud/agent/api/routing/RemoteAccessVpnCfgCommand.java +++ b/core/src/main/java/com/cloud/agent/api/routing/RemoteAccessVpnCfgCommand.java @@ -30,6 +30,12 @@ public class RemoteAccessVpnCfgCommand extends NetworkElementCommand { private String localCidr; private String publicInterface; + // items related to VPN IKEv2 implementation + private String vpnType; + private String caCert; + private String serverCert; + private String serverKey; + protected RemoteAccessVpnCfgCommand() { this.create = false; } @@ -43,7 +49,7 @@ public boolean executeInSequence() { return true; } - public RemoteAccessVpnCfgCommand(boolean create, String vpnServerAddress, String localIp, String ipRange, String ipsecPresharedKey, boolean vpcEnabled) { + public RemoteAccessVpnCfgCommand(boolean create, String vpnServerAddress, String localIp, String ipRange, String ipsecPresharedKey, boolean vpcEnabled, String vpnType, String caCert, String serverCert, String serverKey) { this.vpnServerIp = vpnServerAddress; this.ipRange = ipRange; this.presharedKey = ipsecPresharedKey; @@ -55,6 +61,10 @@ public RemoteAccessVpnCfgCommand(boolean create, String vpnServerAddress, String } else { this.setPublicInterface("eth2"); } + this.vpnType = vpnType; + this.caCert = caCert; + this.serverCert = serverCert; + this.serverKey = serverKey; } public String getVpnServerIp() { @@ -109,4 +119,36 @@ public void setPublicInterface(String publicInterface) { this.publicInterface = publicInterface; } + public String getVpnType() { + return vpnType; + } + + public void setVpnType(String vpnType) { + this.vpnType = vpnType; + } + + public String getCaCert() { + return caCert; + } + + public void setCaCert(String caCert) { + this.caCert = caCert; + } + + public String getServerCert() { + return serverCert; + } + + public void setServerCert(String serverCert) { + this.serverCert = serverCert; + } + + public String getServerKey() { + return serverKey; + } + + public void setServerKey(String serverKey) { + this.serverKey = serverKey; + } + } diff --git a/core/src/main/java/com/cloud/agent/api/routing/VpnUsersCfgCommand.java b/core/src/main/java/com/cloud/agent/api/routing/VpnUsersCfgCommand.java index 3510d14fad52..7ca4bd72ace3 100644 --- a/core/src/main/java/com/cloud/agent/api/routing/VpnUsersCfgCommand.java +++ b/core/src/main/java/com/cloud/agent/api/routing/VpnUsersCfgCommand.java @@ -79,12 +79,13 @@ public String getUsernamePassword() { } UsernamePassword[] userpwds; + private String vpnType; protected VpnUsersCfgCommand() { } - public VpnUsersCfgCommand(List addUsers, List removeUsers) { + public VpnUsersCfgCommand(List addUsers, List removeUsers, String vpnType) { userpwds = new UsernamePassword[addUsers.size() + removeUsers.size()]; int i = 0; for (VpnUser vpnUser : removeUsers) { @@ -93,6 +94,8 @@ public VpnUsersCfgCommand(List addUsers, List removeUsers) { for (VpnUser vpnUser : addUsers) { userpwds[i++] = new UsernamePassword(vpnUser.getUsername(), vpnUser.getPassword(), true); } + + this.vpnType = vpnType; } @Override @@ -104,4 +107,7 @@ public UsernamePassword[] getUserpwds() { return userpwds; } + public String getVpnType() { + return vpnType; + } } diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/DhcpEntryConfigItem.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/DhcpEntryConfigItem.java index 0710ecc1dcd5..01f86790824b 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/DhcpEntryConfigItem.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/DhcpEntryConfigItem.java @@ -37,6 +37,9 @@ public List generateConfig(final NetworkElementCommand cmd) { final VmDhcpConfig vmDhcpConfig = new VmDhcpConfig(command.getVmName(), command.getVmMac(), command.getVmIpAddress(), command.getVmIp6Address(), command.getDuid(), command.getDefaultDns(), command.getDefaultRouter(), command.getStaticRoutes(), command.isDefault(), command.isRemove()); + vmDhcpConfig.setBootFilename(command.getBootFilename()); + vmDhcpConfig.setNetworkBootIp(command.getNetworkBootIp()); + return generateConfigItems(vmDhcpConfig); } diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/RemoteAccessVpnConfigItem.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/RemoteAccessVpnConfigItem.java index be51c30745b0..3586eecdbb95 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/RemoteAccessVpnConfigItem.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/RemoteAccessVpnConfigItem.java @@ -32,10 +32,21 @@ public class RemoteAccessVpnConfigItem extends AbstractConfigItemFacade { @Override public List generateConfig(final NetworkElementCommand cmd) { - final RemoteAccessVpnCfgCommand command = (RemoteAccessVpnCfgCommand) cmd; + final RemoteAccessVpnCfgCommand command = (RemoteAccessVpnCfgCommand)cmd; + + final RemoteAccessVpn remoteAccessVpn = new RemoteAccessVpn( + command.isCreate(), + command.getIpRange(), + command.getPresharedKey(), + command.getVpnServerIp(), + command.getLocalIp(), + command.getLocalCidr(), + command.getPublicInterface(), + command.getVpnType(), + command.getCaCert(), + command.getServerCert(), + command.getServerKey()); - final RemoteAccessVpn remoteAccessVpn = new RemoteAccessVpn(command.isCreate(), command.getIpRange(), command.getPresharedKey(), command.getVpnServerIp(), command.getLocalIp(), command.getLocalCidr(), - command.getPublicInterface()); return generateConfigItems(remoteAccessVpn); } diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/VpnUsersConfigItem.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/VpnUsersConfigItem.java index c98a93e2d3d0..2dd87c6c1810 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/VpnUsersConfigItem.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/VpnUsersConfigItem.java @@ -41,7 +41,7 @@ public List generateConfig(final NetworkElementCommand cmd) { vpnUsers.add(new VpnUser(userpwd.getUsername(), userpwd.getPassword(), userpwd.isAdd())); } - final VpnUserList vpnUserList = new VpnUserList(vpnUsers); + final VpnUserList vpnUserList = new VpnUserList(vpnUsers, command.getVpnType()); return generateConfigItems(vpnUserList); } diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/RemoteAccessVpn.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/RemoteAccessVpn.java index 5b5c05bf7fd7..e025b915814c 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/RemoteAccessVpn.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/RemoteAccessVpn.java @@ -24,11 +24,17 @@ public class RemoteAccessVpn extends ConfigBase { public boolean create; public String ipRange, presharedKey, vpnServerIp, localIp, localCidr, publicInterface; + // items related to VPN IKEv2 implementation + private String vpnType; + private String caCert; + private String serverCert; + private String serverKey; + public RemoteAccessVpn() { super(ConfigBase.REMOTEACCESSVPN); } - public RemoteAccessVpn(boolean create, String ipRange, String presharedKey, String vpnServerIp, String localIp, String localCidr, String publicInterface) { + public RemoteAccessVpn(boolean create, String ipRange, String presharedKey, String vpnServerIp, String localIp, String localCidr, String publicInterface, String vpnType, String caCert, String serverCert, String serverKey) { super(ConfigBase.REMOTEACCESSVPN); this.create = create; this.ipRange = ipRange; @@ -37,6 +43,10 @@ public RemoteAccessVpn(boolean create, String ipRange, String presharedKey, Stri this.localIp = localIp; this.localCidr = localCidr; this.publicInterface = publicInterface; + this.vpnType = vpnType; + this.caCert = caCert; + this.serverCert = serverCert; + this.serverKey = serverKey; } public boolean isCreate() { @@ -95,4 +105,36 @@ public void setPublicInterface(String publicInterface) { this.publicInterface = publicInterface; } + public String getVpnType() { + return vpnType; + } + + public void setVpnType(String vpnType) { + this.vpnType = vpnType; + } + + public String getCaCert() { + return caCert; + } + + public void setCaCert(String caCert) { + this.caCert = caCert; + } + + public String getServerCert() { + return serverCert; + } + + public void setServerCert(String serverCert) { + this.serverCert = serverCert; + } + + public String getServerKey() { + return serverKey; + } + + public void setServerKey(String serverKey) { + this.serverKey = serverKey; + } + } diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/VmDhcpConfig.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/VmDhcpConfig.java index d9cb8b0b2645..f4f13048a570 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/VmDhcpConfig.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/VmDhcpConfig.java @@ -30,6 +30,9 @@ public class VmDhcpConfig extends ConfigBase { private String staticRoutes; private boolean defaultEntry; + private String bootFilename; + private String networkBootIp; + // Indicate if the entry should be removed when set to true private boolean remove; @@ -132,4 +135,19 @@ public void setDefaultEntry(boolean defaultEntry) { this.defaultEntry = defaultEntry; } + public String getBootFilename() { + return bootFilename; + } + + public void setBootFilename(String bootFilename) { + this.bootFilename = bootFilename; + } + + public String getNetworkBootIp() { + return networkBootIp; + } + + public void setNetworkBootIp(String networkBootIp) { + this.networkBootIp = networkBootIp; + } } diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/VpnUserList.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/VpnUserList.java index 115fcc9bd1ef..b3e5c0e5df47 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/VpnUserList.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/VpnUserList.java @@ -23,14 +23,16 @@ public class VpnUserList extends ConfigBase { private List vpnUsers; + private String vpnType; public VpnUserList() { super(ConfigBase.VPN_USER_LIST); } - public VpnUserList(List vpnUsers) { + public VpnUserList(List vpnUsers, String vpnType) { super(ConfigBase.VPN_USER_LIST); this.vpnUsers = vpnUsers; + this.vpnType = vpnType; } public List getVpnUsers() { @@ -41,4 +43,11 @@ public void setVpnUsers(List vpnUsers) { this.vpnUsers = vpnUsers; } + public String getVpnType() { + return vpnType; + } + + public void setVpnType(String vpnType) { + this.vpnType = vpnType; + } } diff --git a/core/src/main/java/com/cloud/network/HAProxyConfigurator.java b/core/src/main/java/com/cloud/network/HAProxyConfigurator.java index 98eeef8d3aad..5632b5730d9c 100644 --- a/core/src/main/java/com/cloud/network/HAProxyConfigurator.java +++ b/core/src/main/java/com/cloud/network/HAProxyConfigurator.java @@ -511,6 +511,9 @@ private List getRulesForPool(final LoadBalancerTO lbTO, final boolean ke if(lbTO.getLbProtocol() != null && lbTO.getLbProtocol().equals("tcp-proxy")) { sb.append(" send-proxy"); } + else if(lbTO.getLbProtocol() != null && lbTO.getLbProtocol().equals("tcp-proxy-v2")) { + sb.append(" send-proxy-v2"); + } dstSubRule.add(sb.toString()); if (stickinessSubRule != null) { sb.append(" cookie ").append(dest.getDestIp().replace(".", "_")).append('-').append(dest.getDestPort()).toString(); diff --git a/core/src/main/java/com/cloud/storage/template/SwiftVolumeDownloader.java b/core/src/main/java/com/cloud/storage/template/SwiftVolumeDownloader.java new file mode 100644 index 000000000000..01d3be5b99cd --- /dev/null +++ b/core/src/main/java/com/cloud/storage/template/SwiftVolumeDownloader.java @@ -0,0 +1,396 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.storage.template; + +import com.cloud.agent.api.to.SwiftTO; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.cloudstack.storage.command.DownloadCommand; +import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType; +import org.apache.commons.codec.digest.DigestUtils; +import org.apache.http.Header; +import org.apache.http.HttpEntityEnclosingRequest; +import org.apache.http.HttpRequest; +import org.apache.http.HttpResponse; +import org.apache.http.client.HttpRequestRetryHandler; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.http.conn.ConnectTimeoutException; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.TrustStrategy; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.protocol.HttpContext; +import org.apache.http.ssl.SSLContextBuilder; +import org.apache.log4j.Logger; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLException; +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InterruptedIOException; +import java.net.UnknownHostException; +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.util.Date; + +/** + * Download a volume file using HTTP(S) + * + * This class, once instantiated, has the purpose to download a Volume to staging nfs or cache when using as Swift Image Store. + * + * Execution of the instance is started when runInContext() is called. + */ +public class SwiftVolumeDownloader extends ManagedContextRunnable implements TemplateDownloader { + private static final Logger LOGGER = Logger.getLogger(SwiftVolumeDownloader.class.getName()); + private static final int DOWNLOAD_BUFFER_SIZE_BYTES = 1024* 1024; + + private final String downloadUrl; + private final String fileName; + private final String fileExtension; + private final long volumeId; + private final CloseableHttpClient httpClient; + private final HttpGet httpGet; + private final DownloadCompleteCallback downloadCompleteCallback; + private final SwiftTO swiftTO; + private String errorString = ""; + private Status status = Status.NOT_STARTED; + private final ResourceType resourceType = ResourceType.VOLUME; + private long remoteSize; + private String md5sum; + private long downloadTime; + private long totalBytes; + private final long maxVolumeSizeInBytes; + private final String installPathPrefix; + private final String installPath; + private File volumeFile; + private boolean resume = false; + + public SwiftVolumeDownloader(DownloadCommand cmd, DownloadCompleteCallback downloadCompleteCallback, long maxVolumeSizeInBytes, String installPathPrefix) { + this.downloadUrl = cmd.getUrl(); + this.swiftTO = (SwiftTO) cmd.getDataStore(); + this.maxVolumeSizeInBytes = maxVolumeSizeInBytes; + this.httpClient = initializeHttpClient(); + this.downloadCompleteCallback = downloadCompleteCallback; + this.fileName = cmd.getName(); + this.fileExtension = cmd.getFormat().getFileExtension(); + this.volumeId = cmd.getId(); + this.installPathPrefix = installPathPrefix; + this.installPath = cmd.getInstallPath(); + this.httpGet = new HttpGet(downloadUrl); + } + + private CloseableHttpClient initializeHttpClient(){ + + CloseableHttpClient client = null; + try { + //trust all certs + SSLContext sslContext = new SSLContextBuilder() + .loadTrustMaterial(null, (TrustStrategy) (chain, authType) -> true) + .build(); + client = HttpClients.custom().setSSLContext(sslContext) + .setSSLHostnameVerifier(new NoopHostnameVerifier()) + .setRetryHandler(buildRetryHandler(5)) + .build(); + } catch (NoSuchAlgorithmException e) { + e.printStackTrace(); + } catch (KeyManagementException e) { + e.printStackTrace(); + } catch (KeyStoreException e) { + e.printStackTrace(); + } + + return client; + } + + private HttpRequestRetryHandler buildRetryHandler(int retryCount){ + + HttpRequestRetryHandler customRetryHandler = new HttpRequestRetryHandler() { + @Override + public boolean retryRequest( + IOException exception, + int executionCount, + HttpContext context) { + if (executionCount >= retryCount) { + // Do not retry if over max retry count + return false; + } + if (exception instanceof InterruptedIOException) { + // Timeout + return false; + } + if (exception instanceof UnknownHostException) { + // Unknown host + return false; + } + if (exception instanceof ConnectTimeoutException) { + // Connection refused + return false; + } + if (exception instanceof SSLException) { + // SSL handshake exception + return false; + } + HttpClientContext clientContext = HttpClientContext.adapt(context); + HttpRequest request = clientContext.getRequest(); + boolean idempotent = !(request instanceof HttpEntityEnclosingRequest); + if (idempotent) { + // Retry if the request is considered idempotent + return true; + } + return false; + } + + }; + return customRetryHandler; + } + + @Override + public long download(boolean resume, DownloadCompleteCallback callback) { + if (!status.equals(Status.NOT_STARTED)) { + // Only start downloading if we haven't started yet. + LOGGER.info("Volume download is already started, not starting again. Volume: " + downloadUrl); + return 0; + } + + HttpResponse response = null; + try { + response = httpClient.execute(httpGet); + } catch (IOException e) { + e.printStackTrace(); + errorString = "Exception while executing HttpMethod " + httpGet.getMethod() + " on URL " + downloadUrl + " " + + response.getStatusLine().getStatusCode() + " " + response.getStatusLine().getReasonPhrase(); + LOGGER.error(errorString); + status = Status.UNRECOVERABLE_ERROR; + return 0; + } + + // Headers + long contentLength = response.getEntity().getContentLength(); + Header contentType = response.getEntity().getContentType(); + + // Check the contentLengthHeader and transferEncodingHeader. + if (contentLength <= 0) { + errorString = "The Content Length of " + downloadUrl + " is <= 0 and content Type is "+contentType.toString(); + LOGGER.error(errorString); + status = Status.UNRECOVERABLE_ERROR; + return 0; + } else { + // The ContentLengthHeader is supplied, parse it's value. + remoteSize = contentLength; + } + + if (remoteSize > maxVolumeSizeInBytes) { + errorString = "Remote size is too large for volume " + downloadUrl + " remote size is " + remoteSize + " max allowed is " + maxVolumeSizeInBytes; + LOGGER.error(errorString); + status = Status.UNRECOVERABLE_ERROR; + return 0; + } + + InputStream inputStream; + try { + inputStream = new BufferedInputStream(response.getEntity().getContent()); + } catch (IOException e) { + errorString = "Exception occurred while opening InputStream for volume from " + downloadUrl; + LOGGER.error(errorString); + status = Status.UNRECOVERABLE_ERROR; + return 0; + } + + String filePath = installPathPrefix + File.separator + installPath; + File directory = new File(filePath); + File srcFile = new File(filePath + File.separator + fileName); + try { + if (!directory.exists()) { + LOGGER.info("Creating directories "+filePath); + directory.mkdirs(); + } + if (!srcFile.createNewFile()) { + LOGGER.info("Reusing existing file " + srcFile.getPath()); + } + } catch (IOException e) { + errorString = "Exception occurred while creating temp file " + srcFile.getPath(); + LOGGER.error(errorString); + status = Status.UNRECOVERABLE_ERROR; + return 0; + } + + LOGGER.info("Starting download from " + downloadUrl + " to staging with size " + remoteSize + " bytes to "+filePath); + final Date downloadStart = new Date(); + + try (FileOutputStream fileOutputStream = new FileOutputStream(srcFile);) { + BufferedOutputStream outputStream = new BufferedOutputStream(fileOutputStream,DOWNLOAD_BUFFER_SIZE_BYTES); + byte[] data = new byte[DOWNLOAD_BUFFER_SIZE_BYTES]; + int bufferLength = 0; + while((bufferLength = inputStream.read(data,0,DOWNLOAD_BUFFER_SIZE_BYTES)) >= 0){ + totalBytes += bufferLength; + outputStream.write(data,0,bufferLength); + status = Status.IN_PROGRESS; + LOGGER.trace("Download in progress: " + getDownloadPercent() + "%"); + if(totalBytes >= remoteSize){ + volumeFile = srcFile; + status = Status.DOWNLOAD_FINISHED; + } + } + outputStream.close(); + inputStream.close(); + } catch (IOException e) { + LOGGER.error("Exception when downloading from url to staging nfs:" + e.getMessage(), e); + status = Status.RECOVERABLE_ERROR; + return 0; + } + + downloadTime = new Date().getTime() - downloadStart.getTime(); + + try (FileInputStream fs = new FileInputStream(srcFile)) { + md5sum = DigestUtils.md5Hex(fs); + } catch (IOException e) { + LOGGER.error("Failed to get md5sum: " + srcFile.getAbsoluteFile()); + } + + if (status == Status.DOWNLOAD_FINISHED) { + LOGGER.info("Template download from " + downloadUrl + " to staging nfs, transferred " + totalBytes + " in " + (downloadTime / 1000) + " seconds, completed successfully!"); + } else { + LOGGER.error("Template download from " + downloadUrl + " to staging nfs, transferred " + totalBytes + " in " + (downloadTime / 1000) + " seconds, completed with status " + status.toString()); + } + + // Close http connection + httpGet.releaseConnection(); + + // Call the callback! + if (callback != null) { + callback.downloadComplete(status); + } + + return totalBytes; + } + + public String getDownloadUrl() { + return httpGet.getURI().toString(); + } + + @Override + public Status getStatus() { + return status; + } + + @Override + public long getDownloadTime() { + return downloadTime; + } + + @Override + public long getDownloadedBytes() { + return totalBytes; + } + + @Override + public boolean stopDownload() { + switch (status) { + case IN_PROGRESS: + if (httpGet != null) { + httpGet.abort(); + } + break; + case UNKNOWN: + case NOT_STARTED: + case RECOVERABLE_ERROR: + case UNRECOVERABLE_ERROR: + case ABORTED: + case DOWNLOAD_FINISHED: + // Remove the object if it already has been uploaded. + // SwiftUtil.deleteObject(swiftTO, swiftPath); + break; + default: + break; + } + + status = Status.ABORTED; + return true; + } + + @Override + public int getDownloadPercent() { + if (remoteSize == 0) { + return 0; + } + + return (int) (100.0 * totalBytes / remoteSize); + } + + @Override + protected void runInContext() { + LOGGER.info("Starting download in managed context resume = " + resume + " callback = " + downloadCompleteCallback.toString()); + download(resume, downloadCompleteCallback); + } + + @Override + public void setStatus(Status status) { + this.status = status; + } + + public boolean isResume() { + return resume; + } + + @Override + public String getDownloadError() { + return errorString; + } + + @Override + public String getDownloadLocalPath() { + return installPath; + } + + @Override + public void setResume(boolean resume) { + this.resume = resume; + } + + @Override + public void setDownloadError(String error) { + errorString = error; + } + + @Override + public boolean isInited() { + return true; + } + + public ResourceType getResourceType() { + return resourceType; + } + + public String getFileExtension() { + return fileExtension; + } + + public String getMd5sum() { return md5sum; } + + public File getVolumeFile() { return volumeFile; } + + public long getMaxTemplateSizeInBytes() {return maxVolumeSizeInBytes;} +} \ No newline at end of file diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java index b184a74312b5..ccd0ee15a6a9 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java @@ -37,6 +37,7 @@ public class TemplateObjectTO implements DataTO { private long accountId; private String checksum; private boolean hvm; + private String bootFilename; private String displayText; private DataStoreTO imageDataStore; private String name; @@ -61,6 +62,7 @@ public TemplateObjectTO(VirtualMachineTemplate template) { this.displayText = template.getDisplayText(); this.checksum = template.getChecksum(); this.hvm = template.isRequiresHvm(); + this.bootFilename = template.getBootFilename(); this.accountId = template.getAccountId(); this.name = template.getUniqueName(); this.format = template.getFormat(); @@ -75,6 +77,7 @@ public TemplateObjectTO(TemplateInfo template) { this.displayText = template.getDisplayText(); this.checksum = template.getChecksum(); this.hvm = template.isRequiresHvm(); + this.bootFilename = template.getBootFilename(); this.accountId = template.getAccountId(); this.name = template.getUniqueName(); this.format = template.getFormat(); @@ -126,6 +129,14 @@ public void setRequiresHvm(boolean hvm) { this.hvm = hvm; } + public String getBootFilename() { + return bootFilename; + } + + public void setBootFilename(String bootFilename) { + this.bootFilename = bootFilename; + } + public String getDescription() { return displayText; } diff --git a/core/src/test/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResourceTest.java b/core/src/test/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResourceTest.java index 6eb30aeeed9e..2b7a0be8b7b8 100644 --- a/core/src/test/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResourceTest.java +++ b/core/src/test/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResourceTest.java @@ -545,21 +545,21 @@ public void testRemoteAccessVpnCfgCommand() { } protected RemoteAccessVpnCfgCommand generateRemoteAccessVpnCfgCommand1() { - final RemoteAccessVpnCfgCommand cmd = new RemoteAccessVpnCfgCommand(true, "124.10.10.10", "10.10.1.1", "10.10.1.10-10.10.1.20", "sharedkey", false); + final RemoteAccessVpnCfgCommand cmd = new RemoteAccessVpnCfgCommand(true, "124.10.10.10", "10.10.1.1", "10.10.1.10-10.10.1.20", "sharedkey", false, null, null, null, null); cmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, ROUTERNAME); cmd.setLocalCidr("10.1.1.1/24"); return cmd; } protected RemoteAccessVpnCfgCommand generateRemoteAccessVpnCfgCommand2() { - final RemoteAccessVpnCfgCommand cmd = new RemoteAccessVpnCfgCommand(false, "124.10.10.10", "10.10.1.1", "10.10.1.10-10.10.1.20", "sharedkey", false); + final RemoteAccessVpnCfgCommand cmd = new RemoteAccessVpnCfgCommand(false, "124.10.10.10", "10.10.1.1", "10.10.1.10-10.10.1.20", "sharedkey", false, null, null, null, null); cmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, ROUTERNAME); cmd.setLocalCidr("10.1.1.1/24"); return cmd; } protected RemoteAccessVpnCfgCommand generateRemoteAccessVpnCfgCommand3() { - final RemoteAccessVpnCfgCommand cmd = new RemoteAccessVpnCfgCommand(true, "124.10.10.10", "10.10.1.1", "10.10.1.10-10.10.1.20", "sharedkey", true); + final RemoteAccessVpnCfgCommand cmd = new RemoteAccessVpnCfgCommand(true, "124.10.10.10", "10.10.1.1", "10.10.1.10-10.10.1.20", "sharedkey", true, null, null, null, null); cmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, ROUTERNAME); cmd.setLocalCidr("10.1.1.1/24"); return cmd; diff --git a/core/src/test/java/com/cloud/network/HAProxyConfiguratorTest.java b/core/src/test/java/com/cloud/network/HAProxyConfiguratorTest.java index 2a282cbeca8b..f6fb740aac51 100644 --- a/core/src/test/java/com/cloud/network/HAProxyConfiguratorTest.java +++ b/core/src/test/java/com/cloud/network/HAProxyConfiguratorTest.java @@ -21,6 +21,9 @@ import static org.junit.Assert.assertTrue; +import java.util.ArrayList; +import java.util.List; + import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; @@ -32,9 +35,6 @@ import com.cloud.agent.api.to.LoadBalancerTO; import com.cloud.network.lb.LoadBalancingRule.LbDestination; -import java.util.List; -import java.util.ArrayList; - /** * @author dhoogland * diff --git a/deps/install-non-oss.sh b/deps/install-non-oss.sh index c6b91e07cec4..af96c33b3349 100755 --- a/deps/install-non-oss.sh +++ b/deps/install-non-oss.sh @@ -24,6 +24,11 @@ mvn install:install-file -Dfile=cloud-iControl.jar -DgroupId=com.cloud.com. # Version: unknown mvn install:install-file -Dfile=cloud-netscaler-sdx.jar -DgroupId=com.cloud.com.citrix -DartifactId=netscaler-sdx -Dversion=1.0 -Dpackaging=jar +# From http://support.netapp.com/ (not available online, contact your support representative) +# Version: 4.0 +if [ -e cloud-manageontap.jar ]; then mv cloud-manageontap.jar manageontap.jar; fi +mvn install:install-file -Dfile=manageontap.jar -DgroupId=com.cloud.com.netapp -DartifactId=manageontap -Dversion=4.0 -Dpackaging=jar + # From https://my.vmware.com/group/vmware/get-download?downloadGroup=VSP510-WEBSDK-510 # Version: 5.1, Release-date: 2012-09-10, Build: 774886 mvn install:install-file -Dfile=vim25_51.jar -DgroupId=com.cloud.com.vmware -DartifactId=vmware-vim25 -Dversion=5.1 -Dpackaging=jar @@ -45,3 +50,6 @@ mvn install:install-file -Dfile=pbm_65.jar -DgroupId=com.cloud.com.vmware -Darti # From https://my.vmware.com/group/vmware/downloads/get-download?downloadGroup=VS-MGMT-SDK67 mvn install:install-file -Dfile=pbm_67.jar -DgroupId=com.cloud.com.vmware -DartifactId=vmware-pbm -Dversion=6.7 -Dpackaging=jar + +# From https://github.com/Juniper/netconf-java/releases +mvn install:install-file -Dfile=Netconf.jar -DgroupId=net.juniper.netconf -DartifactId=netconf-juniper -Dversion=1.0 -Dpackaging=jar diff --git a/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java b/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java index 61489e5f7c89..2cdd2469aed0 100644 --- a/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java +++ b/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java @@ -182,6 +182,9 @@ void transferPortableIP(long ipAddrId, long currentNetworkId, long newNetworkId) PublicIp assignDedicateIpAddress(Account owner, Long guestNtwkId, Long vpcId, long dcId, boolean isSourceNat) throws ConcurrentOperationException, InsufficientAddressCapacityException; + PublicIp assignDedicateIpAddress(Account owner, Long guestNtwkId, Long vpcId, long dcId, boolean isSourceNat, String ignoreIp) + throws ConcurrentOperationException, InsufficientAddressCapacityException; + IpAddress allocateIp(Account ipOwner, boolean isSystem, Account caller, long callerId, DataCenter zone, Boolean displayIp, String ipaddress) throws ConcurrentOperationException, ResourceAllocationException, InsufficientAddressCapacityException; diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java index 40592bea86b8..aa58ce359a27 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java @@ -288,7 +288,9 @@ public int registerForInitialConnects(final StartupCommandProcessor creator, fin @Override public void unregisterForHostEvents(final int id) { s_logger.debug("Deregistering " + id); - _hostMonitors.remove(id); + synchronized (_hostMonitors) { + _hostMonitors.remove(id); + } } private AgentControlAnswer handleControlCommand(final AgentAttache attache, final AgentControlCommand cmd) { @@ -537,55 +539,60 @@ public void removeAgent(final AgentAttache attache, final Status nextState) { if (removed != null) { removed.disconnect(nextState); } - - for (final Pair monitor : _hostMonitors) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending Disconnect to listener: " + monitor.second().getClass().getName()); + synchronized (_hostMonitors) { + for (final Pair monitor : _hostMonitors) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Sending Disconnect to listener: " + monitor.second().getClass().getName()); + } + monitor.second().processDisconnect(hostId, nextState); } - monitor.second().processDisconnect(hostId, nextState); } } @Override public void notifyMonitorsOfNewlyAddedHost(long hostId) { - for (final Pair monitor : _hostMonitors) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending host added to listener: " + monitor.second().getClass().getSimpleName()); - } + synchronized (_hostMonitors) { + for (final Pair monitor : _hostMonitors) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Sending host added to listener: " + monitor.second().getClass().getSimpleName()); + } - monitor.second().processHostAdded(hostId); + monitor.second().processHostAdded(hostId); + } } } protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, final StartupCommand[] cmd, final boolean forRebalance) throws ConnectionException { final long hostId = attache.getId(); final HostVO host = _hostDao.findById(hostId); - for (final Pair monitor : _hostMonitors) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending Connect to listener: " + monitor.second().getClass().getSimpleName()); - } - for (int i = 0; i < cmd.length; i++) { - try { - monitor.second().processConnect(host, cmd[i], forRebalance); - } catch (final Exception e) { - if (e instanceof ConnectionException) { - final ConnectionException ce = (ConnectionException)e; - if (ce.isSetupError()) { - s_logger.warn("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage()); - handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); - throw ce; - } else { - s_logger.info("Monitor " + monitor.second().getClass().getSimpleName() + " says not to continue the connect process for " + hostId + " due to " + e.getMessage()); + synchronized (_hostMonitors) { + for (final Pair monitor : _hostMonitors) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Sending Connect to listener: " + monitor.second().getClass().getSimpleName()); + } + for (int i = 0; i < cmd.length; i++) { + try { + monitor.second().processConnect(host, cmd[i], forRebalance); + } catch (final Exception e) { + if (e instanceof ConnectionException) { + final ConnectionException ce = (ConnectionException) e; + if (ce.isSetupError()) { + s_logger.warn("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage()); + handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); + throw ce; + } else { + s_logger.info("Monitor " + monitor.second().getClass().getSimpleName() + " says not to continue the connect process for " + hostId + " due to " + e.getMessage()); + handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true); + return attache; + } + } else if (e instanceof HypervisorVersionChangedException) { handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true); - return attache; + throw new CloudRuntimeException("Unable to connect " + attache.getId(), e); + } else { + s_logger.error("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage(), e); + handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); + throw new CloudRuntimeException("Unable to connect " + attache.getId(), e); } - } else if (e instanceof HypervisorVersionChangedException) { - handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true); - throw new CloudRuntimeException("Unable to connect " + attache.getId(), e); - } else { - s_logger.error("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage(), e); - handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); - throw new CloudRuntimeException("Unable to connect " + attache.getId(), e); } } } @@ -1034,23 +1041,27 @@ public void reconnect(final long hostId) throws AgentUnavailableException { @Override public void notifyMonitorsOfHostAboutToBeRemoved(long hostId) { - for (final Pair monitor : _hostMonitors) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending host about to be removed to listener: " + monitor.second().getClass().getSimpleName()); - } + synchronized (_hostMonitors) { + for (final Pair monitor : _hostMonitors) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Sending host about to be removed to listener: " + monitor.second().getClass().getSimpleName()); + } - monitor.second().processHostAboutToBeRemoved(hostId); + monitor.second().processHostAboutToBeRemoved(hostId); + } } } @Override public void notifyMonitorsOfRemovedHost(long hostId, long clusterId) { - for (final Pair monitor : _hostMonitors) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending host removed to listener: " + monitor.second().getClass().getSimpleName()); - } + synchronized (_hostMonitors) { + for (final Pair monitor : _hostMonitors) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Sending host removed to listener: " + monitor.second().getClass().getSimpleName()); + } - monitor.second().processHostRemoved(hostId, clusterId); + monitor.second().processHostRemoved(hostId, clusterId); + } } } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index a1831a5cacdb..669333bbc7c6 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -121,6 +121,7 @@ import com.cloud.agent.api.UnPlugNicAnswer; import com.cloud.agent.api.UnPlugNicCommand; import com.cloud.agent.api.UnregisterVMCommand; +import com.cloud.agent.api.baremetal.DestroyCommand; import com.cloud.agent.api.routing.NetworkElementCommand; import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.DpdkTO; @@ -486,7 +487,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) throws } String rootVolumeName = String.format("ROOT-%s", vmFinal.getId()); - if (template.getFormat() == ImageFormat.ISO) { + if (template.getFormat() == ImageFormat.ISO || template.getFormat() == ImageFormat.PXEBOOT) { volumeMgr.allocateRawVolume(Type.ROOT, rootVolumeName, rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(), rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), vmFinal, template, owner, null); } else if (template.getFormat() == ImageFormat.BAREMETAL) { @@ -1013,6 +1014,11 @@ public void orchestrateStart(final String vmUuid, final Map() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) throws CloudRuntimeException { @@ -2200,6 +2234,29 @@ private void deleteVMSnapshots(VMInstanceVO vm, boolean expunge) { } } + private VirtualMachineTO toVmTOforBaremetal(VMInstanceVO vm) { + VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); + VirtualMachineTO vmTO = toVmTO(profile); + List nicTOs = new ArrayList(); + + for (NicVO nicVO: _nicsDao.listByVmId(vm.getId())) { + NicTO nicTO = new NicTO(); + nicTO.setMac(nicVO.getMacAddress()); + nicTO.setDefaultNic(nicVO.isDefaultNic()); + nicTO.setBroadcastUri(nicVO.getBroadcastUri()); + Network nw = _networkDao.findById(nicVO.getNetworkId()); + if (nw != null) { + nicTO.setNetworkUuid(nw.getUuid()); + } + + nicTOs.add(nicTO); + } + + vmTO.setNics(nicTOs.toArray(new NicTO[nicTOs.size()])); + + return vmTO; + } + protected boolean checkVmOnHost(final VirtualMachine vm, final long hostId) throws AgentUnavailableException, OperationTimedoutException { final Answer answer = _agentMgr.send(hostId, new CheckVirtualMachineCommand(vm.getInstanceName())); if (answer == null || !answer.getResult()) { diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index bf21e62c6e59..476c0880dd19 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -570,10 +570,9 @@ public VolumeInfo createVolumeFromSnapshot(Volume volume, Snapshot snapshot, Use } protected DiskProfile createDiskCharacteristics(VolumeInfo volumeInfo, VirtualMachineTemplate template, DataCenter dc, DiskOffering diskOffering) { - if (volumeInfo.getVolumeType() == Type.ROOT && Storage.ImageFormat.ISO != template.getFormat()) { + if (volumeInfo.getVolumeType() == Type.ROOT && Storage.ImageFormat.ISO != template.getFormat() && ImageFormat.PXEBOOT != template.getFormat()) { String templateToString = getReflectOnlySelectedFields(template); String zoneToString = getReflectOnlySelectedFields(dc); - TemplateDataStoreVO ss = _vmTemplateStoreDao.findByTemplateZoneDownloadStatus(template.getId(), dc.getId(), VMTemplateStorageResourceAssoc.Status.DOWNLOADED); if (ss == null) { throw new CloudRuntimeException(String.format("Template [%s] has not been completely downloaded to the zone [%s].", @@ -634,7 +633,8 @@ public VolumeInfo createVolume(VolumeInfo volumeInfo, VirtualMachine vm, Virtual StoragePool pool = null; DiskProfile dskCh = null; - if (volumeInfo.getVolumeType() == Type.ROOT && Storage.ImageFormat.ISO != template.getFormat()) { + if (volumeInfo.getVolumeType() == Type.ROOT && Storage.ImageFormat.ISO != template.getFormat() && + !ImageFormat.PXEBOOT.equals(template.getFormat())) { dskCh = createDiskCharacteristics(volumeInfo, template, dc, diskOffering); storageMgr.setDiskProfileThrottling(dskCh, offering, diskOffering); } else { diff --git a/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnVO.java b/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnVO.java index 95e3693a99c5..dfc0d271b91c 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnVO.java @@ -69,11 +69,18 @@ public class RemoteAccessVpnVO implements RemoteAccessVpn { @Column(name = "display", updatable = true, nullable = false) protected boolean display = true; + @Column(name = "vpn_type") + private String vpnType; + + @Encrypt + @Column(name = "ca_certificate", length = 8191) + private String caCertificate; + public RemoteAccessVpnVO() { uuid = UUID.randomUUID().toString(); } - public RemoteAccessVpnVO(long accountId, long domainId, Long networkId, long publicIpId, Long vpcId, String localIp, String ipRange, String presharedKey) { + public RemoteAccessVpnVO(long accountId, long domainId, Long networkId, long publicIpId, Long vpcId, String localIp, String ipRange, String presharedKey, String vpnType) { this.accountId = accountId; serverAddressId = publicIpId; this.ipRange = ipRange; @@ -84,6 +91,7 @@ public RemoteAccessVpnVO(long accountId, long domainId, Long networkId, long pub state = State.Added; uuid = UUID.randomUUID().toString(); this.vpcId = vpcId; + this.vpnType = vpnType; } @Override @@ -123,6 +131,15 @@ public void setIpsecPresharedKey(String ipsecPresharedKey) { this.ipsecPresharedKey = ipsecPresharedKey; } + @Override + public String getCaCertificate() { + return caCertificate; + } + + public void setCaCertificate(String caCertificate) { + this.caCertificate = caCertificate; + } + @Override public String getLocalIp() { return localIp; @@ -166,6 +183,15 @@ public boolean isDisplay() { return display; } + public void setVpnType(String vpnType) { + this.vpnType = vpnType; + } + + @Override + public String getVpnType() { + return vpnType; + } + @Override public Class getEntityType() { return RemoteAccessVpn.class; diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java index 83034b3fdbe0..636713f4e705 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java @@ -68,6 +68,9 @@ public class VpcVO implements Vpc { @Column(name = "vpc_offering_id") long vpcOfferingId; + @Column(name ="network_boot_ip") + String networkBootIp; + @Column(name = GenericDao.REMOVED_COLUMN) Date removed; @@ -98,7 +101,7 @@ public VpcVO() { public VpcVO(final long zoneId, final String name, final String displayText, final long accountId, final long domainId, final long vpcOffId, final String cidr, final String networkDomain, final boolean useDistributedRouter, - final boolean regionLevelVpc, final boolean isRedundant) { + final boolean regionLevelVpc, final boolean isRedundant, final String networkBootIp) { this.zoneId = zoneId; this.name = name; this.displayText = displayText; @@ -112,6 +115,7 @@ public VpcVO(final long zoneId, final String name, final String displayText, fin usesDistributedRouter = useDistributedRouter; this.regionLevelVpc = regionLevelVpc; redundant = isRedundant; + this.networkBootIp = networkBootIp; } @Override @@ -167,6 +171,10 @@ public void setVpcOfferingId(final long vpcOfferingId) { this.vpcOfferingId = vpcOfferingId; } + public String getNetworkBootIp() { return networkBootIp; } + + public void setNetworkBootIp(String networkBootIp) { this.networkBootIp = networkBootIp; } + public Date getRemoved() { return removed; } diff --git a/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java b/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java index bfdbba2a6d3b..a9c3c56499d8 100644 --- a/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java @@ -130,6 +130,18 @@ public class DiskOfferingVO implements DiskOffering { private Long iopsWriteRateMaxLength; + @Column(name = "min_iops_per_gb") + Long minIopsPerGb; + + @Column(name = "max_iops_per_gb") + Long maxIopsPerGb; + + @Column(name = "highest_min_iops") + Long highestMinIops; + + @Column(name = "highest_max_iops") + Long highestMaxIops; + @Column(name = "cache_mode", updatable = true, nullable = false) @Enumerated(value = EnumType.STRING) private DiskCacheMode cacheMode; @@ -558,6 +570,46 @@ public void setIopsWriteRateMaxLength(Long iopsWriteRateMaxLength) { this.iopsWriteRateMaxLength = iopsWriteRateMaxLength; } + @Override + public Long getMinIopsPerGb() { + return this.minIopsPerGb; + } + + @Override + public void setMinIopsPerGb(Long minIopsPerGb) { + this.minIopsPerGb = minIopsPerGb; + } + + @Override + public Long getMaxIopsPerGb() { + return maxIopsPerGb; + } + + @Override + public void setMaxIopsPerGb(Long maxIopsPerGb) { + this.maxIopsPerGb = maxIopsPerGb; + } + + @Override + public Long getHighestMinIops() { + return this.highestMinIops; + } + + @Override + public void setHighestMinIops(Long highestMinIops) { + this.highestMinIops = highestMinIops; + } + + @Override + public Long getHighestMaxIops() { + return this.highestMaxIops; + } + + @Override + public void setHighestMaxIops(Long highestMaxIops) { + this.highestMaxIops = highestMaxIops; + } + @Override public void setHypervisorSnapshotReserve(Integer hypervisorSnapshotReserve) { this.hypervisorSnapshotReserve = hypervisorSnapshotReserve; diff --git a/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java b/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java index 8f66da052e90..817a8093128a 100644 --- a/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java @@ -71,6 +71,9 @@ public class VMTemplateVO implements VirtualMachineTemplate { @Column(name = "hvm") private boolean requiresHvm; + @Column(name = "boot_filename") + private String bootFilename; + @Column(name = "bits") private int bits; @@ -227,6 +230,37 @@ public VMTemplateVO(long id, String name, ImageFormat format, boolean isPublic, this.deployAsIs = deployAsIs; } + public VMTemplateVO(long id, String name, ImageFormat format, boolean isPublic, boolean featured, + boolean isExtractable, TemplateType type, String url, boolean requiresHvm, int bits, long accountId, + String cksum, String displayText, boolean enablePassword, long guestOSId, boolean bootable, + HypervisorType hyperType, String templateTag, Map details, boolean sshKeyEnabled, + boolean isDynamicallyScalable, boolean directDownload, boolean deployAsIs, String bootFilename) { + this(id, + name, + format, + isPublic, + featured, + isExtractable, + type, + url, + requiresHvm, + bits, + accountId, + cksum, + displayText, + enablePassword, + guestOSId, + bootable, + hyperType, + templateTag, + details, + sshKeyEnabled, + isDynamicallyScalable, + directDownload, + deployAsIs); + this.bootFilename = bootFilename; + } + public static VMTemplateVO createPreHostIso(Long id, String uniqueName, String name, ImageFormat format, boolean isPublic, boolean featured, TemplateType type, String url, Date created, boolean requiresHvm, int bits, long accountId, String cksum, String displayText, boolean enablePassword, long guestOSId, boolean bootable, HypervisorType hyperType) { @@ -410,6 +444,15 @@ public boolean isRequiresHvm() { return requiresHvm; } + @Override + public String getBootFilename() { + return bootFilename; + } + + public void setBootFilename(String bootFilename) { + this.bootFilename = bootFilename; + } + public void setRequiresHvm(boolean value) { requiresHvm = value; } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/GuestOSDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/GuestOSDao.java index 83e19b17e258..fec6f18e05d1 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/GuestOSDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/GuestOSDao.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.storage.dao; +import java.util.Map; + import com.cloud.storage.GuestOSVO; import com.cloud.utils.db.GenericDao; @@ -24,4 +26,6 @@ public interface GuestOSDao extends GenericDao { GuestOSVO listByDisplayName(String displayName); GuestOSVO findByCategoryIdAndDisplayNameOrderByCreatedDesc(long categoryId, String displayName); + + Map loadDetails(GuestOSVO guestOS); } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/GuestOSDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/GuestOSDaoImpl.java index 68da2b92acb5..ff06f95ed2b9 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/GuestOSDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/GuestOSDaoImpl.java @@ -16,9 +16,12 @@ // under the License. package com.cloud.storage.dao; - import java.util.List; +import java.util.Map; + +import javax.inject.Inject; +import org.apache.cloudstack.resourcedetail.dao.GuestOsDetailsDao; import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; @@ -31,6 +34,9 @@ @Component public class GuestOSDaoImpl extends GenericDaoBase implements GuestOSDao { + @Inject + private GuestOsDetailsDao _guestOsDetailsDao; + protected final SearchBuilder Search; public GuestOSDaoImpl() { @@ -62,4 +68,8 @@ public GuestOSVO findByCategoryIdAndDisplayNameOrderByCreatedDesc(long categoryI } return null; } + + public Map loadDetails(GuestOSVO guestOS) { + return _guestOsDetailsDao.listDetailsKeyPairs(guestOS.getId()); + } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java index 2d351f52fe4f..24b2861b3a99 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -29,11 +29,6 @@ import javax.inject.Inject; -import com.cloud.upgrade.dao.Upgrade41510to41520; -import com.cloud.upgrade.dao.Upgrade41600to41610; -import com.cloud.upgrade.dao.Upgrade41610to41700; -import com.cloud.upgrade.dao.Upgrade41700to41710; -import com.cloud.upgrade.dao.Upgrade41710to41800; import org.apache.cloudstack.utils.CloudStackVersion; import org.apache.commons.lang3.StringUtils; import org.apache.log4j.Logger; @@ -65,18 +60,51 @@ import com.cloud.upgrade.dao.Upgrade307to410; import com.cloud.upgrade.dao.Upgrade30to301; import com.cloud.upgrade.dao.Upgrade40to41; -import com.cloud.upgrade.dao.Upgrade41000to41100; +import com.cloud.upgrade.dao.Upgrade41000to4100226; +import com.cloud.upgrade.dao.Upgrade4100226to4100227; +import com.cloud.upgrade.dao.Upgrade4100227to4100228; +import com.cloud.upgrade.dao.Upgrade4100228to4100229; +import com.cloud.upgrade.dao.Upgrade4100229to4100230; +import com.cloud.upgrade.dao.Upgrade4100230to4100231; +import com.cloud.upgrade.dao.Upgrade4100231to4100232; +import com.cloud.upgrade.dao.Upgrade4100232to4100233; +import com.cloud.upgrade.dao.Upgrade4100233to4100234; +import com.cloud.upgrade.dao.Upgrade4100234to4100235; +import com.cloud.upgrade.dao.Upgrade4100235to4100236; +import com.cloud.upgrade.dao.Upgrade4100236to4100237; +import com.cloud.upgrade.dao.Upgrade4100237to4100238; +import com.cloud.upgrade.dao.Upgrade4100238to4100239; +import com.cloud.upgrade.dao.Upgrade4100239to4100240; +import com.cloud.upgrade.dao.Upgrade4100240to41100; import com.cloud.upgrade.dao.Upgrade410to420; import com.cloud.upgrade.dao.Upgrade41100to41110; import com.cloud.upgrade.dao.Upgrade41110to41120; import com.cloud.upgrade.dao.Upgrade41120to41130; -import com.cloud.upgrade.dao.Upgrade41120to41200; -import com.cloud.upgrade.dao.Upgrade41200to41300; +import com.cloud.upgrade.dao.Upgrade41130to41200; +import com.cloud.upgrade.dao.Upgrade41200to41201; +import com.cloud.upgrade.dao.Upgrade41201to41202; +import com.cloud.upgrade.dao.Upgrade41202to41203; +import com.cloud.upgrade.dao.Upgrade41203to41204; +import com.cloud.upgrade.dao.Upgrade41204to41205; +import com.cloud.upgrade.dao.Upgrade41205to41206; +import com.cloud.upgrade.dao.Upgrade41206to41207; +import com.cloud.upgrade.dao.Upgrade41207to41208; +import com.cloud.upgrade.dao.Upgrade41208to41209; +import com.cloud.upgrade.dao.Upgrade41209to412010; +import com.cloud.upgrade.dao.Upgrade41210to412011; +import com.cloud.upgrade.dao.Upgrade41211to412012; +import com.cloud.upgrade.dao.Upgrade41212to412013; +import com.cloud.upgrade.dao.Upgrade412025to41300; import com.cloud.upgrade.dao.Upgrade41300to41310; import com.cloud.upgrade.dao.Upgrade41310to41400; import com.cloud.upgrade.dao.Upgrade41400to41500; import com.cloud.upgrade.dao.Upgrade41500to41510; +import com.cloud.upgrade.dao.Upgrade41510to41520; import com.cloud.upgrade.dao.Upgrade41520to41600; +import com.cloud.upgrade.dao.Upgrade41600to41610; +import com.cloud.upgrade.dao.Upgrade41610to41700; +import com.cloud.upgrade.dao.Upgrade41700to41710; +import com.cloud.upgrade.dao.Upgrade41710to41800; import com.cloud.upgrade.dao.Upgrade420to421; import com.cloud.upgrade.dao.Upgrade421to430; import com.cloud.upgrade.dao.Upgrade430to440; @@ -192,12 +220,40 @@ public DatabaseUpgradeChecker() { .next("4.9.2.0" , new Upgrade4920to4930()) .next("4.9.3.0" , new Upgrade4930to41000()) .next("4.9.3.1" , new Upgrade4930to41000()) - .next("4.10.0.0", new Upgrade41000to41100()) + .next("4.10.0.0", new Upgrade41000to4100226()) + .next("4.10.0.226", new Upgrade4100226to4100227()) + .next("4.10.0.227", new Upgrade4100227to4100228()) + .next("4.10.0.228", new Upgrade4100228to4100229()) + .next("4.10.0.229", new Upgrade4100229to4100230()) + .next("4.10.0.230", new Upgrade4100230to4100231()) + .next("4.10.0.231", new Upgrade4100231to4100232()) + .next("4.10.0.232", new Upgrade4100232to4100233()) + .next("4.10.0.233", new Upgrade4100233to4100234()) + .next("4.10.0.234", new Upgrade4100234to4100235()) + .next("4.10.0.235", new Upgrade4100235to4100236()) + .next("4.10.0.236", new Upgrade4100236to4100237()) + .next("4.10.0.237", new Upgrade4100237to4100238()) + .next("4.10.0.238", new Upgrade4100238to4100239()) + .next("4.10.0.239", new Upgrade4100239to4100240()) + .next("4.10.0.240", new Upgrade4100240to41100()) .next("4.11.0.0", new Upgrade41100to41110()) .next("4.11.1.0", new Upgrade41110to41120()) .next("4.11.2.0", new Upgrade41120to41130()) - .next("4.11.3.0", new Upgrade41120to41200()) - .next("4.12.0.0", new Upgrade41200to41300()) + .next("4.11.3.0", new Upgrade41130to41200()) + .next("4.12.0.0", new Upgrade41200to41201()) + .next("4.12.0.1", new Upgrade41201to41202()) + .next("4.12.0.2", new Upgrade41202to41203()) + .next("4.12.0.3", new Upgrade41203to41204()) + .next("4.12.0.4", new Upgrade41204to41205()) + .next("4.12.0.5", new Upgrade41205to41206()) + .next("4.12.0.6", new Upgrade41206to41207()) + .next("4.12.0.7", new Upgrade41207to41208()) + .next("4.12.0.8", new Upgrade41208to41209()) + .next("4.12.0.9", new Upgrade41209to412010()) + .next("4.12.0.10", new Upgrade41210to412011()) + .next("4.12.0.11", new Upgrade41211to412012()) + .next("4.12.0.12", new Upgrade41212to412013()) + .next("4.12.0.25", new Upgrade412025to41300()) .next("4.13.0.0", new Upgrade41300to41310()) .next("4.13.1.0", new Upgrade41310to41400()) .next("4.14.0.0", new Upgrade41400to41500()) @@ -286,7 +342,7 @@ protected void upgrade(CloudStackVersion dbVersion, CloudStackVersion currentVer for (DbUpgrade upgrade : upgrades) { VersionVO version; s_logger.debug("Running upgrade " + upgrade.getClass().getSimpleName() + " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade - .getUpgradableVersionRange()[1] + " to " + upgrade.getUpgradedVersion()); + .getUpgradableVersionRange()[1] + " to " + upgrade.getUpgradedVersion()); TransactionLegacy txn = TransactionLegacy.open("Upgrade"); txn.start(); try { @@ -323,7 +379,7 @@ protected void upgrade(CloudStackVersion dbVersion, CloudStackVersion currentVer txn = TransactionLegacy.open("Cleanup"); try { s_logger.info("Cleanup upgrade " + upgrade.getClass().getSimpleName() + " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade - .getUpgradableVersionRange()[1] + " to " + upgrade.getUpgradedVersion()); + .getUpgradableVersionRange()[1] + " to " + upgrade.getUpgradedVersion()); txn.start(); Connection conn; diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41000to4100226.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41000to4100226.java new file mode 100644 index 000000000000..819ffd3e7374 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41000to4100226.java @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade41000to4100226 implements DbUpgrade { + + final static Logger LOG = Logger.getLogger(Upgrade41000to4100226.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.10.0.0", "4.10.0.226"}; + } + + @Override + public String getUpgradedVersion() { + return "4.10.0.226"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-41000to4100226.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-41000to4100226-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100226to4100227.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100226to4100227.java new file mode 100644 index 000000000000..8d189f3a5e78 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100226to4100227.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade4100226to4100227 implements DbUpgrade { + final static Logger LOG = Logger.getLogger(Upgrade4100226to4100227.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.10.0.226", "4.10.0.227"}; + } + + @Override + public String getUpgradedVersion() { + return "4.10.0.227"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-4100226to4100227.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-4100226to4100227-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100227to4100228.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100227to4100228.java new file mode 100644 index 000000000000..ca5039aef949 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100227to4100228.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade4100227to4100228 implements DbUpgrade { + final static Logger LOG = Logger.getLogger(Upgrade4100227to4100228.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.10.0.227", "4.10.0.228"}; + } + + @Override + public String getUpgradedVersion() { + return "4.10.0.228"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-4100227to4100228.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-4100227to4100228-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100228to4100229.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100228to4100229.java new file mode 100644 index 000000000000..8fd923cf0b75 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100228to4100229.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade4100228to4100229 implements DbUpgrade { + final static Logger LOG = Logger.getLogger(Upgrade4100228to4100229.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.10.0.228", "4.10.0.229"}; + } + + @Override + public String getUpgradedVersion() { + return "4.10.0.229"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-4100228to4100229.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-4100228to4100229-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100229to4100230.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100229to4100230.java new file mode 100644 index 000000000000..41a9d5f9cb34 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100229to4100230.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade4100229to4100230 implements DbUpgrade { + final static Logger LOG = Logger.getLogger(Upgrade4100229to4100230.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.10.0.229", "4.10.0.230"}; + } + + @Override + public String getUpgradedVersion() { + return "4.10.0.230"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-4100229to4100230.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-4100229to4100230-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100230to4100231.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100230to4100231.java new file mode 100644 index 000000000000..411c57ab5ebf --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100230to4100231.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade4100230to4100231 implements DbUpgrade { + final static Logger LOG = Logger.getLogger(Upgrade4100230to4100231.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.10.0.230", "4.10.0.231"}; + } + + @Override + public String getUpgradedVersion() { + return "4.10.0.231"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-4100230to4100231.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-4100230to4100231-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100231to4100232.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100231to4100232.java new file mode 100644 index 000000000000..960537b579d2 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100231to4100232.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade4100231to4100232 implements DbUpgrade { + final static Logger LOG = Logger.getLogger(Upgrade4100231to4100232.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.10.0.231", "4.10.0.232"}; + } + + @Override + public String getUpgradedVersion() { + return "4.10.0.232"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-4100231to4100232.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-4100231to4100232-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100232to4100233.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100232to4100233.java new file mode 100644 index 000000000000..7e4c7b755e99 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100232to4100233.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade4100232to4100233 implements DbUpgrade { + final static Logger LOG = Logger.getLogger(Upgrade4100232to4100233.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.10.0.232", "4.10.0.233"}; + } + + @Override + public String getUpgradedVersion() { + return "4.10.0.233"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-4100232to4100233.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-4100232to4100233-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100233to4100234.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100233to4100234.java new file mode 100644 index 000000000000..be595c68fda6 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100233to4100234.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade4100233to4100234 implements DbUpgrade { + final static Logger LOG = Logger.getLogger(Upgrade4100233to4100234.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.10.0.233", "4.10.0.234"}; + } + + @Override + public String getUpgradedVersion() { + return "4.10.0.234"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-4100233to4100234.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-4100233to4100234-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100234to4100235.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100234to4100235.java new file mode 100644 index 000000000000..19d5b936088e --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100234to4100235.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade4100234to4100235 implements DbUpgrade { + final static Logger LOG = Logger.getLogger(Upgrade4100234to4100235.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.10.0.234", "4.10.0.235"}; + } + + @Override + public String getUpgradedVersion() { + return "4.10.0.235"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-4100234to4100235.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-4100234to4100235-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100235to4100236.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100235to4100236.java new file mode 100644 index 000000000000..3469c6397bbb --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100235to4100236.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade4100235to4100236 implements DbUpgrade { + final static Logger LOG = Logger.getLogger(Upgrade4100235to4100236.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.10.0.235", "4.10.0.236"}; + } + + @Override + public String getUpgradedVersion() { + return "4.10.0.236"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-4100235to4100236.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-4100235to4100236-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100236to4100237.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100236to4100237.java new file mode 100644 index 000000000000..dca9ec9fa554 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100236to4100237.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade4100236to4100237 implements DbUpgrade { + final static Logger LOG = Logger.getLogger(Upgrade4100236to4100237.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.10.0.236", "4.10.0.237"}; + } + + @Override + public String getUpgradedVersion() { + return "4.10.0.237"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-4100236to4100237.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-4100236to4100237-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100237to4100238.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100237to4100238.java new file mode 100644 index 000000000000..4684a3b26820 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100237to4100238.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade4100237to4100238 implements DbUpgrade { + final static Logger LOG = Logger.getLogger(Upgrade4100237to4100238.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.10.0.237", "4.10.0.238"}; + } + + @Override + public String getUpgradedVersion() { + return "4.10.0.238"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-4100237to4100238.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-4100237to4100238-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100238to4100239.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100238to4100239.java new file mode 100644 index 000000000000..9c82fbc06988 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100238to4100239.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade4100238to4100239 implements DbUpgrade { + final static Logger LOG = Logger.getLogger(Upgrade4100238to4100239.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.10.0.238", "4.10.0.239"}; + } + + @Override + public String getUpgradedVersion() { + return "4.10.0.239"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-4100238to4100239.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-4100238to4100239-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100239to4100240.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100239to4100240.java new file mode 100644 index 000000000000..467414514282 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100239to4100240.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade4100239to4100240 implements DbUpgrade { + final static Logger LOG = Logger.getLogger(Upgrade4100239to4100240.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.10.0.239", "4.10.0.240"}; + } + + @Override + public String getUpgradedVersion() { + return "4.10.0.240"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-4100239to4100240.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-4100239to4100240-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41000to41100.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100240to41100.java similarity index 94% rename from engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41000to41100.java rename to engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100240to41100.java index 3900cf0bf82d..457a36e7c9ee 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41000to41100.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4100240to41100.java @@ -14,7 +14,6 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. - package com.cloud.upgrade.dao; import java.io.InputStream; @@ -30,13 +29,12 @@ import com.cloud.utils.PropertiesUtil; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade41000to41100 implements DbUpgrade { - - final static Logger LOG = Logger.getLogger(Upgrade41000to41100.class); +public class Upgrade4100240to41100 implements DbUpgrade { + final static Logger LOG = Logger.getLogger(Upgrade4100240to41100.class); @Override public String[] getUpgradableVersionRange() { - return new String[] {"4.10.0.0", "4.11.0.0"}; + return new String[] {"4.10.0.240", "4.11.0.0"}; } @Override @@ -51,7 +49,7 @@ public boolean supportsRollingUpgrade() { @Override public InputStream[] getPrepareScripts() { - final String scriptFile = "META-INF/db/schema-41000to41100.sql"; + final String scriptFile = "META-INF/db/schema-4100240to41100.sql"; final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); if (script == null) { throw new CloudRuntimeException("Unable to find " + scriptFile); @@ -66,6 +64,17 @@ public void performDataMigration(Connection conn) { validateUserDataInBase64(conn); } + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-4100240to41100-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + private void checkAndEnableDynamicRoles(final Connection conn) { final Map apiMap = PropertiesUtil.processConfigFile(new String[] { "commands.properties" }); if (apiMap == null || apiMap.isEmpty()) { @@ -120,15 +129,4 @@ private void validateUserDataInBase64(Connection conn) { LOG.debug("Done validating base64 content of user data"); } } - - @Override - public InputStream[] getCleanupScripts() { - final String scriptFile = "META-INF/db/schema-41000to41100-cleanup.sql"; - final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); - if (script == null) { - throw new CloudRuntimeException("Unable to find " + scriptFile); - } - - return new InputStream[] {script}; - } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41100to41110.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41100to41110.java index 3703040771bd..4ad08409a0c8 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41100to41110.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41100to41110.java @@ -31,7 +31,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class Upgrade41100to41110 implements DbUpgrade { - final static Logger LOG = Logger.getLogger(Upgrade41000to41100.class); + final static Logger LOG = Logger.getLogger(Upgrade41100to41110.class); @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41120to41200.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41130to41200.java similarity index 93% rename from engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41120to41200.java rename to engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41130to41200.java index f68f04a53aa8..7967c33cbd75 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41120to41200.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41130to41200.java @@ -22,16 +22,17 @@ import java.sql.PreparedStatement; import java.sql.SQLException; -import com.cloud.utils.exception.CloudRuntimeException; import org.apache.log4j.Logger; -public class Upgrade41120to41200 implements DbUpgrade { +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade41130to41200 implements DbUpgrade { - final static Logger LOG = Logger.getLogger(Upgrade41120to41200.class); + final static Logger LOG = Logger.getLogger(Upgrade41130to41200.class); @Override public String[] getUpgradableVersionRange() { - return new String[] {"4.11.2.0", "4.12.0.0"}; + return new String[] {"4.11.3.0", "4.12.0.0"}; } @Override diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41200to41201.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41200to41201.java new file mode 100644 index 000000000000..a53d101e484c --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41200to41201.java @@ -0,0 +1,133 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.util.HashMap; + +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.log4j.Logger; + +public class Upgrade41200to41201 implements DbUpgrade { + + final static Logger LOG = Logger.getLogger(Upgrade41200to41201.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.12.0.0", "4.12.0.1"}; + } + + @Override + public String getUpgradedVersion() { + return "4.12.0.1"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-41200to41201.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + populateGuestOsDetails(conn); + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-41200to41201-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + private void populateGuestOsDetails(Connection conn){ + final HashMap xenServerGuestOsMemoryMap = new HashMap(70); + + xenServerGuestOsMemoryMap.put("Ubuntu 18.04 (32-bit)", new MemoryValues(512l, 32 * 1024l)); + xenServerGuestOsMemoryMap.put("Ubuntu 18.04 (64-bit)", new MemoryValues(512l, 128 * 1024l)); + xenServerGuestOsMemoryMap.put("Ubuntu 18.10 (32-bit)", new MemoryValues(512l, 32 * 1024l)); + xenServerGuestOsMemoryMap.put("Ubuntu 18.10 (64-bit)", new MemoryValues(512l, 128 * 1024l)); + xenServerGuestOsMemoryMap.put("Ubuntu 19.04 (32-bit)", new MemoryValues(512l, 32 * 1024l)); + xenServerGuestOsMemoryMap.put("Ubuntu 19.04 (64-bit)", new MemoryValues(512l, 128 * 1024l)); + + final String insertDynamicMemoryVal = "insert into guest_os_details(guest_os_id, name, value, display) select id,?, ?, 0 from guest_os where display_name = ?"; + + PreparedStatement ps = null; + + try { + ps = conn.prepareStatement(insertDynamicMemoryVal); + + for (String key: xenServerGuestOsMemoryMap.keySet()){ + ps.setString(1,"xenserver.dynamicMin"); + ps.setString(2,String.valueOf(xenServerGuestOsMemoryMap.get(key).getMin())); + ps.setString(3, key); + ps.executeUpdate(); + + ps.setString(1,"xenserver.dynamicMax"); + ps.setString(2,String.valueOf(xenServerGuestOsMemoryMap.get(key).getMax())); + ps.setString(3, key); + ps.executeUpdate(); + } + } catch(SQLException e) { + throw new CloudRuntimeException("Unable to update guestOs details", e); + } finally { + try { + if (ps != null && !ps.isClosed()) { + ps.close(); + } + } catch (SQLException e) { + } + } + } + + private static class MemoryValues { + long max; + long min; + + public MemoryValues(final long min, final long max) { + this.min = min * 1024 * 1024; + this.max = max * 1024 * 1024; + } + + public long getMax() { + return max; + } + + public long getMin() { + return min; + } + } + +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41201to41202.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41201to41202.java new file mode 100644 index 000000000000..78240b32e916 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41201to41202.java @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.log4j.Logger; + +public class Upgrade41201to41202 implements DbUpgrade { + + final static Logger LOG = Logger.getLogger(Upgrade41201to41202.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.12.0.1", "4.12.0.2"}; + } + + @Override + public String getUpgradedVersion() { + return "4.12.0.2"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-41201to41202.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-41201to41202-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41200to41300.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade412025to41300.java similarity index 87% rename from engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41200to41300.java rename to engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade412025to41300.java index 2de8dc983587..ff5028227772 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41200to41300.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade412025to41300.java @@ -22,11 +22,11 @@ import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade41200to41300 implements DbUpgrade { +public class Upgrade412025to41300 implements DbUpgrade { @Override public String[] getUpgradableVersionRange() { - return new String[] {"4.12.0.0", "4.13.0.0"}; + return new String[] {"4.12.0.25", "4.13.0.0"}; } @Override @@ -41,7 +41,7 @@ public boolean supportsRollingUpgrade() { @Override public InputStream[] getPrepareScripts() { - final String scriptFile = "META-INF/db/schema-41200to41300.sql"; + final String scriptFile = "META-INF/db/schema-412025to41300.sql"; final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); if (script == null) { throw new CloudRuntimeException("Unable to find " + scriptFile); @@ -56,7 +56,7 @@ public void performDataMigration(Connection conn) { @Override public InputStream[] getCleanupScripts() { - final String scriptFile = "META-INF/db/schema-41200to41300-cleanup.sql"; + final String scriptFile = "META-INF/db/schema-412025to41300-cleanup.sql"; final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); if (script == null) { throw new CloudRuntimeException("Unable to find " + scriptFile); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41202to41203.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41202to41203.java new file mode 100644 index 000000000000..a67751999ca0 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41202to41203.java @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.log4j.Logger; + +public class Upgrade41202to41203 implements DbUpgrade { + + final static Logger LOG = Logger.getLogger(Upgrade41202to41203.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.12.0.2", "4.12.0.3"}; + } + + @Override + public String getUpgradedVersion() { + return "4.12.0.3"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-41202to41203.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-41202to41203-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41203to41204.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41203to41204.java new file mode 100644 index 000000000000..b1933f05a426 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41203to41204.java @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.log4j.Logger; + +public class Upgrade41203to41204 implements DbUpgrade { + + final static Logger LOG = Logger.getLogger(Upgrade41203to41204.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.12.0.3", "4.12.0.4"}; + } + + @Override + public String getUpgradedVersion() { + return "4.12.0.4"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-41203to41204.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-41203to41204-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41204to41205.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41204to41205.java new file mode 100644 index 000000000000..75aaa66ad69c --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41204to41205.java @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.log4j.Logger; + +public class Upgrade41204to41205 implements DbUpgrade { + + final static Logger LOG = Logger.getLogger(Upgrade41204to41205.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.12.0.4", "4.12.0.5"}; + } + + @Override + public String getUpgradedVersion() { + return "4.12.0.5"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-41204to41205.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-41204to41205-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41205to41206.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41205to41206.java new file mode 100644 index 000000000000..c00ea4f0e849 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41205to41206.java @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.log4j.Logger; + +public class Upgrade41205to41206 implements DbUpgrade { + + final static Logger LOG = Logger.getLogger(Upgrade41205to41206.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.12.0.5", "4.12.0.6"}; + } + + @Override + public String getUpgradedVersion() { + return "4.12.0.6"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-41205to41206.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-41205to41206-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41206to41207.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41206to41207.java new file mode 100644 index 000000000000..34ffe6c16100 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41206to41207.java @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.log4j.Logger; + +import java.io.InputStream; +import java.sql.Connection; + +public class Upgrade41206to41207 implements DbUpgrade { + + final static Logger LOG = Logger.getLogger(Upgrade41206to41207.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.12.0.6", "4.12.0.7"}; + } + + @Override + public String getUpgradedVersion() { + return "4.12.0.7"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-41206to41207.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-41206to41207-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41207to41208.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41207to41208.java new file mode 100644 index 000000000000..13372dfa1b92 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41207to41208.java @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.log4j.Logger; + +import java.io.InputStream; +import java.sql.Connection; + +public class Upgrade41207to41208 implements DbUpgrade { + + final static Logger LOG = Logger.getLogger(Upgrade41207to41208.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.12.0.7", "4.12.0.8"}; + } + + @Override + public String getUpgradedVersion() { + return "4.12.0.8"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-41207to41208.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-41207to41208-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41208to41209.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41208to41209.java new file mode 100644 index 000000000000..f4daaa4a13c4 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41208to41209.java @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.log4j.Logger; + +import java.io.InputStream; +import java.sql.Connection; + +public class Upgrade41208to41209 implements DbUpgrade { + + final static Logger LOG = Logger.getLogger(Upgrade41208to41209.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.12.0.8", "4.12.0.9"}; + } + + @Override + public String getUpgradedVersion() { + return "4.12.0.9"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-41208to41209.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-41208to41209-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41209to412010.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41209to412010.java new file mode 100644 index 000000000000..3336a7b88a18 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41209to412010.java @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade41209to412010 implements DbUpgrade { + + final static Logger LOG = Logger.getLogger(Upgrade41209to412010.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.12.0.9", "4.12.0.10"}; + } + + @Override + public String getUpgradedVersion() { + return "4.12.0.10"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-41209to412010.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-41209to412010-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41210to412011.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41210to412011.java new file mode 100644 index 000000000000..66eca27847f8 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41210to412011.java @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.log4j.Logger; + +import java.io.InputStream; +import java.sql.Connection; + +public class Upgrade41210to412011 implements DbUpgrade { + + final static Logger LOG = Logger.getLogger(Upgrade41210to412011.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.12.0.10", "4.12.0.11"}; + } + + @Override + public String getUpgradedVersion() { + return "4.12.0.11"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-41210to412011.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-41210to412011-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41211to412012.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41211to412012.java new file mode 100644 index 000000000000..dafffa19c4a9 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41211to412012.java @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade41211to412012 implements DbUpgrade { + + final static Logger LOG = Logger.getLogger(Upgrade41211to412012.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.12.0.11", "4.12.0.12"}; + } + + @Override + public String getUpgradedVersion() { + return "4.12.0.12"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-41211to412012.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-41211to412012-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41212to412013.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41212to412013.java new file mode 100644 index 000000000000..b86c619ab7a5 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41212to412013.java @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade41212to412013 implements DbUpgrade { + + final static Logger LOG = Logger.getLogger(Upgrade41212to412013.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.12.0.12", "4.12.0.13"}; + } + + @Override + public String getUpgradedVersion() { + return "4.12.0.13"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-41212to412013.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + final String scriptFile = "META-INF/db/schema-41212to412013-cleanup.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/RemoteAccessVpnDetailVO.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/RemoteAccessVpnDetailVO.java index 5fb01a25c2a9..86e50a38787e 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/RemoteAccessVpnDetailVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/RemoteAccessVpnDetailVO.java @@ -39,7 +39,7 @@ public class RemoteAccessVpnDetailVO implements ResourceDetail { @Column(name = "name") private String name; - @Column(name = "value", length = 1024) + @Column(name = "value", length = 8191) private String value; @Column(name = "display") diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/RemoteAccessVpnDetailsDao.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/RemoteAccessVpnDetailsDao.java index 297b7f614c12..d0fdbef84171 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/RemoteAccessVpnDetailsDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/RemoteAccessVpnDetailsDao.java @@ -16,11 +16,13 @@ // under the License. package org.apache.cloudstack.resourcedetail.dao; +import java.util.Map; + import org.apache.cloudstack.resourcedetail.RemoteAccessVpnDetailVO; import org.apache.cloudstack.resourcedetail.ResourceDetailsDao; import com.cloud.utils.db.GenericDao; public interface RemoteAccessVpnDetailsDao extends GenericDao, ResourceDetailsDao { - + Map getDetails(long vpnId); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/RemoteAccessVpnDetailsDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/RemoteAccessVpnDetailsDaoImpl.java index a71b006254e5..7fe1e08a7d24 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/RemoteAccessVpnDetailsDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/dao/RemoteAccessVpnDetailsDaoImpl.java @@ -16,17 +16,43 @@ // under the License. package org.apache.cloudstack.resourcedetail.dao; +import java.util.Map; +import java.util.stream.Collectors; import org.springframework.stereotype.Component; +import com.cloud.utils.crypt.DBEncryptionUtil; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + import org.apache.cloudstack.resourcedetail.RemoteAccessVpnDetailVO; import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; @Component public class RemoteAccessVpnDetailsDaoImpl extends ResourceDetailsDaoBase implements RemoteAccessVpnDetailsDao { + protected final SearchBuilder vpnSearch; + + public RemoteAccessVpnDetailsDaoImpl() { + super(); + + vpnSearch = createSearchBuilder(); + vpnSearch.and("remote_access_vpn", vpnSearch.entity().getResourceId(), SearchCriteria.Op.EQ); + vpnSearch.done(); + } + @Override public void addDetail(long resourceId, String key, String value, boolean display) { - super.addDetail(new RemoteAccessVpnDetailVO(resourceId, key, value, display)); + super.addDetail(new RemoteAccessVpnDetailVO(resourceId, key, DBEncryptionUtil.encrypt(value), display)); + } + + @Override + public Map getDetails(long vpnId) { + SearchCriteria sc = vpnSearch.create(); + sc.setParameters("remote_access_vpn", vpnId); + + return listBy(sc).stream().collect(Collectors.toMap(RemoteAccessVpnDetailVO::getName, detail -> { + return DBEncryptionUtil.decrypt(detail.getValue()); + })); } } diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41000to4100226-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41000to4100226-cleanup.sql new file mode 100644 index 000000000000..60c94b7bdf3c --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41000to4100226-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.10.0.0 to 4.10.0.226 +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41000to4100226.sql b/engine/schema/src/main/resources/META-INF/db/schema-41000to4100226.sql new file mode 100644 index 000000000000..a9af8d3cd03c --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41000to4100226.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.10.0.0 to 4.10.0.226 +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100226to4100227-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100226to4100227-cleanup.sql new file mode 100644 index 000000000000..d0e8990dc970 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100226to4100227-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.10.0.226 to 4.10.0.227; +--; \ No newline at end of file diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100226to4100227.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100226to4100227.sql new file mode 100644 index 000000000000..c5be382e2a70 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100226to4100227.sql @@ -0,0 +1,67 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.10.0.226 to 4.10.0.227; +--; + +-- VDI-per-LUN +ALTER TABLE `cloud`.`disk_offering` ADD COLUMN `min_iops_per_gb` int unsigned DEFAULT NULL COMMENT 'Min IOPS per GB'; +ALTER TABLE `cloud`.`disk_offering` ADD COLUMN `max_iops_per_gb` int unsigned DEFAULT NULL COMMENT 'Max IOPS per GB'; +ALTER TABLE `cloud`.`disk_offering` ADD COLUMN `highest_min_iops` int unsigned DEFAULT NULL COMMENT 'Highest Min IOPS for the offering'; +ALTER TABLE `cloud`.`disk_offering` ADD COLUMN `highest_max_iops` int unsigned DEFAULT NULL COMMENT 'Highest Max IOPS for the offering'; +DROP VIEW IF EXISTS `cloud`.`disk_offering_view`; +CREATE VIEW `cloud`.`disk_offering_view` AS + select + disk_offering.id, + disk_offering.uuid, + disk_offering.name, + disk_offering.display_text, + disk_offering.provisioning_type, + disk_offering.disk_size, + disk_offering.min_iops, + disk_offering.max_iops, + disk_offering.created, + disk_offering.tags, + disk_offering.customized, + disk_offering.customized_iops, + disk_offering.removed, + disk_offering.use_local_storage, + disk_offering.system_use, + disk_offering.hv_ss_reserve, + disk_offering.bytes_read_rate, + disk_offering.bytes_write_rate, + disk_offering.iops_read_rate, + disk_offering.iops_write_rate, + disk_offering.min_iops_per_gb, + disk_offering.max_iops_per_gb, + disk_offering.highest_min_iops, + disk_offering.highest_max_iops, + disk_offering.cache_mode, + disk_offering.sort_key, + disk_offering.type, + disk_offering.display_offering, + domain.id domain_id, + domain.uuid domain_uuid, + domain.name domain_name, + domain.path domain_path + from + `cloud`.`disk_offering` + left join + `cloud`.`domain` ON disk_offering.domain_id = domain.id + where + disk_offering.state='ACTIVE'; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100227to4100228-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100227to4100228-cleanup.sql new file mode 100644 index 000000000000..f7b189eab586 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100227to4100228-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.10.0.227 to 4.10.0.228; +--; \ No newline at end of file diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100227to4100228.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100227to4100228.sql new file mode 100644 index 000000000000..075c70d10b14 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100227to4100228.sql @@ -0,0 +1,227 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.10.0.227 to 4.10.0.228; +--; + +-- VPN implementation based on IKEv2 +ALTER TABLE `cloud`.`remote_access_vpn` CHANGE COLUMN `ipsec_psk` `ipsec_psk` VARCHAR(256) NULL ; +ALTER TABLE `cloud`.`remote_access_vpn` + ADD COLUMN `vpn_type` VARCHAR(8) NOT NULL AFTER `display`, + ADD COLUMN `ca_certificate` VARCHAR(8191) NULL AFTER `vpn_type`; + +ALTER TABLE `cloud`.`remote_access_vpn_details` CHANGE COLUMN `value` `value` VARCHAR(8191) NOT NULL ; + +-- XenServer 7.1.1 support update +INSERT INTO `cloud`.`hypervisor_capabilities`( + uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, storage_motion_supported) +values + (UUID(), 'XenServer', '7.1.1', 500, 13, 1); + +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 4.5 (32-bit)', 1, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 4.6 (32-bit)', 2, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 4.7 (32-bit)', 3, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 4.8 (32-bit)', 4, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 5, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 6, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 7, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 8, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 9, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 10, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 11, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 12, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 13, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 14, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 111, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 112, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 141, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 142, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 161, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 162, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 173, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 174, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 175, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 176, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 231, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 232, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (32-bit)', 139, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 5 (64-bit)', 140, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (32-bit)', 143, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (64-bit)', 144, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (32-bit)', 177, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (64-bit)', 178, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (32-bit)', 179, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (64-bit)', 180, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (32-bit)', 171, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (64-bit)', 172, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (32-bit)', 181, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (64-bit)', 182, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (32-bit)', 227, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (64-bit)', 228, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (32-bit)', 248, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (64-bit)', 249, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 7', 246, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Debian Squeeze 6.0 (32-bit)', 132, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Debian Squeeze 6.0 (64-bit)', 133, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Debian Wheezy 7.0 (32-bit)', 183, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Debian Wheezy 7.0 (64-bit)', 184, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 16, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 17, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 18, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 19, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 20, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 21, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 22, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 23, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 24, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 25, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 134, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 135, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 145, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 146, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 207, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 208, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 209, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 210, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 211, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 212, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (32-bit)', 233, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 5 (64-bit)', 234, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (32-bit)', 147, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (64-bit)', 148, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (32-bit)', 213, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (64-bit)', 214, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (32-bit)', 215, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (64-bit)', 216, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (32-bit)', 217, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (64-bit)', 218, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (32-bit)', 219, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (64-bit)', 220, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (32-bit)', 235, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (64-bit)', 236, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (32-bit)', 250, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Enterprise Linux 6 (64-bit)', 251, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Oracle Linux 7', 247, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 4.5 (32-bit)', 26, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 4.6 (32-bit)', 27, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 4.7 (32-bit)', 28, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 4.8 (32-bit)', 29, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 30, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 31, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 32, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 33, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 34, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 35, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 36, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 37, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 38, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 39, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 113, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 114, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 149, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 150, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 189, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 190, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 191, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 192, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 193, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 194, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (32-bit)', 237, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 5 (64-bit)', 238, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (32-bit)', 136, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (64-bit)', 137, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (32-bit)', 195, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (64-bit)', 196, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (32-bit)', 197, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (64-bit)', 198, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (32-bit)', 199, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (64-bit)', 204, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (32-bit)', 205, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (64-bit)', 206, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (32-bit)', 239, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (64-bit)', 240, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 7', 245, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 10 SP1 (32-bit)', 41, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 10 SP1 (64-bit)', 42, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 10 SP2 (32-bit)', 43, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 10 SP2 (64-bit)', 44, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 10 SP3 (32-bit)', 151, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 10 SP3 (64-bit)', 45, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 10 SP4 (32-bit)', 153, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 10 SP4 (64-bit)', 152, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 11 (32-bit)', 46, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 11 (64-bit)', 47, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 11 SP1 (32-bit)', 155, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 11 SP2 (32-bit)', 186, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 11 SP2 (64-bit)', 185, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 11 SP3 (32-bit)', 188, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 11 SP3 (32-bit)', 187, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'SUSE Linux Enterprise Server 12 (64-bit)', 244, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows 7 (32-bit)', 48, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows 7 (64-bit)', 49, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows 8 (32-bit)', 165, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows 8 (64-bit)', 166, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 51, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 87, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 88, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 89, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 90, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows Server 2008 (32-bit)', 52, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows Server 2008 (64-bit)', 53, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows Server 2008 R2 (64-bit)', 54, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows Server 2012 (64-bit)', 167, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows Server 2012 R2 (64-bit)', 168, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 58, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Ubuntu Lucid Lynx 10.04 (32-bit)', 121, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Ubuntu Lucid Lynx 10.04 (64-bit)', 126, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Ubuntu Maverick Meerkat 10.10 (32-bit) (experimental)', 156, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Ubuntu Maverick Meerkat 10.10 (64-bit) (experimental)', 157, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Ubuntu Precise Pangolin 12.04 (32-bit)', 163, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Ubuntu Precise Pangolin 12.04 (64-bit)', 164, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Ubuntu Trusty Tahr 14.04', 241, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Ubuntu Trusty Tahr 14.04', 254, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 169, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 170, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 98, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 99, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 60, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 103, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 200, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 201, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 59, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 100, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 202, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Other install media', 203, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Ubuntu Trusty Tahr 14.04', 255, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Ubuntu Xenial Xerus 16.04', 256, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows 10 (32-bit)', 257, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows 10 (64-bit)', 258, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Windows Server 2016 (64-bit)', 259, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 7', 260, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (32-bit)', 261, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (64-bit)', 262, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (32-bit)', 263, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 6 (64-bit)', 264, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (32-bit)', 265, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (64-bit)', 266, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (32-bit)', 267, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 6 (64-bit)', 268, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CoreOS', 271, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 7', 272, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'Red Hat Enterprise Linux 7', 273, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.1', 'CentOS 7', 274, now(), 0); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100228to4100229-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100228to4100229-cleanup.sql new file mode 100644 index 000000000000..3eed7064b625 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100228to4100229-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.10.0.228 to 4.10.0.229; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100228to4100229.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100228to4100229.sql new file mode 100644 index 000000000000..ebc46205b7f1 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100228to4100229.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.10.0.228 to 4.10.0.229; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100229to4100230-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100229to4100230-cleanup.sql new file mode 100644 index 000000000000..bb33c45055c1 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100229to4100230-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.10.0.229 to 4.10.0.230; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100229to4100230.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100229to4100230.sql new file mode 100644 index 000000000000..dd8399b172db --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100229to4100230.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.10.0.229 to 4.10.0.230; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100230to4100231-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100230to4100231-cleanup.sql new file mode 100644 index 000000000000..4ac2c75c3c46 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100230to4100231-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.10.0.230 to 4.10.0.231; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100230to4100231.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100230to4100231.sql new file mode 100644 index 000000000000..2e52107fb872 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100230to4100231.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.10.0.230 to 4.10.0.231; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100231to4100232-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100231to4100232-cleanup.sql new file mode 100644 index 000000000000..302903b70e88 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100231to4100232-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.10.0.231 to 4.10.0.232; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100231to4100232.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100231to4100232.sql new file mode 100644 index 000000000000..6aae124bd2a4 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100231to4100232.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.10.0.231 to 4.10.0.232; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100232to4100233-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100232to4100233-cleanup.sql new file mode 100644 index 000000000000..72980235e3d1 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100232to4100233-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.10.0.232 to 4.10.0.233; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100232to4100233.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100232to4100233.sql new file mode 100644 index 000000000000..e7ede878fe0f --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100232to4100233.sql @@ -0,0 +1,219 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.10.0.232 to 4.10.0.233; +--; + +-- XenServer 7.6 support update +INSERT INTO `cloud`.`hypervisor_capabilities`( + uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, storage_motion_supported) +values + (UUID(), 'XenServer', '7.6.0', 500, 13, 1); + +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 4.5 (32-bit)', 1, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 4.6 (32-bit)', 2, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 4.7 (32-bit)', 3, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 4.8 (32-bit)', 4, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 5, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 6, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 7, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 8, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 9, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 10, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 11, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 12, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 13, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 14, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 111, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 112, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 141, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 142, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 161, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 162, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 173, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 174, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 175, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 176, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 231, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 232, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (32-bit)', 139, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 5 (64-bit)', 140, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (32-bit)', 143, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (64-bit)', 144, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (32-bit)', 177, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (64-bit)', 178, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (32-bit)', 179, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (64-bit)', 180, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (32-bit)', 171, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (64-bit)', 172, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (32-bit)', 181, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (64-bit)', 182, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (32-bit)', 227, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (64-bit)', 228, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (32-bit)', 248, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (64-bit)', 249, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 7', 246, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Squeeze 6.0 (32-bit)', 132, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Squeeze 6.0 (64-bit)', 133, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Wheezy 7.0 (32-bit)', 183, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Wheezy 7.0 (64-bit)', 184, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 16, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 17, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 18, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 19, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 20, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 21, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 22, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 23, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 24, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 25, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 134, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 135, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 145, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 146, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 207, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 208, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 209, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 210, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 211, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 212, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (32-bit)', 233, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 5 (64-bit)', 234, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (32-bit)', 147, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (64-bit)', 148, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (32-bit)', 213, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (64-bit)', 214, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (32-bit)', 215, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (64-bit)', 216, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (32-bit)', 217, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (64-bit)', 218, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (32-bit)', 219, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (64-bit)', 220, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (32-bit)', 235, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (64-bit)', 236, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (32-bit)', 250, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Enterprise Linux 6 (64-bit)', 251, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Oracle Linux 7', 247, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 4.5 (32-bit)', 26, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 4.6 (32-bit)', 27, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 4.7 (32-bit)', 28, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 4.8 (32-bit)', 29, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 30, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 31, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 32, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 33, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 34, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 35, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 36, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 37, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 38, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 39, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 113, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 114, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 149, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 150, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 189, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 190, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 191, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 192, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 193, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 194, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (32-bit)', 237, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 5 (64-bit)', 238, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (32-bit)', 136, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (64-bit)', 137, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (32-bit)', 195, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (64-bit)', 196, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (32-bit)', 197, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (64-bit)', 198, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (32-bit)', 199, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (64-bit)', 204, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (32-bit)', 205, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (64-bit)', 206, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (32-bit)', 239, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (64-bit)', 240, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 7', 245, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 10 SP1 (32-bit)', 41, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 10 SP1 (64-bit)', 42, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 10 SP2 (32-bit)', 43, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 10 SP2 (64-bit)', 44, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 10 SP3 (32-bit)', 151, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 10 SP3 (64-bit)', 45, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 10 SP4 (32-bit)', 153, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 10 SP4 (64-bit)', 152, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 11 (32-bit)', 46, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 11 (64-bit)', 47, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 11 SP1 (32-bit)', 155, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 11 SP2 (32-bit)', 186, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 11 SP2 (64-bit)', 185, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 11 SP3 (32-bit)', 188, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 11 SP3 (32-bit)', 187, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'SUSE Linux Enterprise Server 12 (64-bit)', 244, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows 7 (32-bit)', 48, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows 7 (64-bit)', 49, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows 8 (32-bit)', 165, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows 8 (64-bit)', 166, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 51, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 87, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 88, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 89, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 90, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows Server 2008 (32-bit)', 52, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows Server 2008 (64-bit)', 53, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows Server 2008 R2 (64-bit)', 54, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows Server 2012 (64-bit)', 167, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows Server 2012 R2 (64-bit)', 168, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 58, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Lucid Lynx 10.04 (32-bit)', 121, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Lucid Lynx 10.04 (64-bit)', 126, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Maverick Meerkat 10.10 (32-bit) (experimental)', 156, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Maverick Meerkat 10.10 (64-bit) (experimental)', 157, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Precise Pangolin 12.04 (32-bit)', 163, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Precise Pangolin 12.04 (64-bit)', 164, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Trusty Tahr 14.04', 241, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Trusty Tahr 14.04', 254, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 169, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 170, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 98, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 99, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 60, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 103, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 200, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 201, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 59, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 100, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 202, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Other install media', 203, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Trusty Tahr 14.04', 255, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Xenial Xerus 16.04', 256, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows 10 (32-bit)', 257, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows 10 (64-bit)', 258, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Windows Server 2016 (64-bit)', 259, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 7', 260, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (32-bit)', 261, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (64-bit)', 262, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (32-bit)', 263, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 6 (64-bit)', 264, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (32-bit)', 265, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (64-bit)', 266, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (32-bit)', 267, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 6 (64-bit)', 268, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CoreOS', 271, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 7', 272, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Red Hat Enterprise Linux 7', 273, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'CentOS 7', 274, now(), 0); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100233to4100234-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100233to4100234-cleanup.sql new file mode 100644 index 000000000000..bb04f30d55e0 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100233to4100234-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.10.0.233 to 4.10.0.234; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100233to4100234.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100233to4100234.sql new file mode 100644 index 000000000000..e70d255b1987 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100233to4100234.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.10.0.233 to 4.10.0.234; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100234to4100235-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100234to4100235-cleanup.sql new file mode 100644 index 000000000000..153707f2d180 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100234to4100235-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.10.0.234 to 4.10.0.235; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100234to4100235.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100234to4100235.sql new file mode 100644 index 000000000000..889bcd21c8d9 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100234to4100235.sql @@ -0,0 +1,219 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.10.0.234 to 4.10.0.235; +--; + +-- XenServer 7.1.2 support update +INSERT INTO `cloud`.`hypervisor_capabilities`( + uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, storage_motion_supported) +values + (UUID(), 'XenServer', '7.1.2', 500, 13, 1); + +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 4.5 (32-bit)', 1, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 4.6 (32-bit)', 2, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 4.7 (32-bit)', 3, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 4.8 (32-bit)', 4, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 5, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 6, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 7, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 8, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 9, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 10, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 11, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 12, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 13, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 14, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 111, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 112, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 141, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 142, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 161, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 162, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 173, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 174, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 175, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 176, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 231, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 232, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (32-bit)', 139, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 5 (64-bit)', 140, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (32-bit)', 143, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit)', 144, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (32-bit)', 177, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit)', 178, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (32-bit)', 179, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit)', 180, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (32-bit)', 171, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit)', 172, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (32-bit)', 181, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit)', 182, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (32-bit)', 227, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit)', 228, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (32-bit)', 248, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit)', 249, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 7', 246, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Debian Squeeze 6.0 (32-bit)', 132, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Debian Squeeze 6.0 (64-bit)', 133, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Debian Wheezy 7.0 (32-bit)', 183, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Debian Wheezy 7.0 (64-bit)', 184, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 16, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 17, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 18, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 19, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 20, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 21, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 22, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 23, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 24, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 25, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 134, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 135, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 145, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 146, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 207, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 208, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 209, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 210, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 211, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 212, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (32-bit)', 233, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 5 (64-bit)', 234, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (32-bit)', 147, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (64-bit)', 148, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (32-bit)', 213, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (64-bit)', 214, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (32-bit)', 215, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (64-bit)', 216, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (32-bit)', 217, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (64-bit)', 218, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (32-bit)', 219, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (64-bit)', 220, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (32-bit)', 235, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (64-bit)', 236, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (32-bit)', 250, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Enterprise Linux 6 (64-bit)', 251, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Oracle Linux 7', 247, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 4.5 (32-bit)', 26, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 4.6 (32-bit)', 27, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 4.7 (32-bit)', 28, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 4.8 (32-bit)', 29, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 30, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 31, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 32, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 33, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 34, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 35, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 36, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 37, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 38, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 39, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 113, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 114, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 149, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 150, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 189, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 190, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 191, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 192, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 193, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 194, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (32-bit)', 237, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 5 (64-bit)', 238, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (32-bit)', 136, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (64-bit)', 137, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (32-bit)', 195, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (64-bit)', 196, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (32-bit)', 197, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (64-bit)', 198, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (32-bit)', 199, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (64-bit)', 204, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (32-bit)', 205, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (64-bit)', 206, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (32-bit)', 239, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (64-bit)', 240, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 7', 245, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 10 SP1 (32-bit)', 41, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 10 SP1 (64-bit)', 42, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 10 SP2 (32-bit)', 43, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 10 SP2 (64-bit)', 44, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 10 SP3 (32-bit)', 151, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 10 SP3 (64-bit)', 45, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 10 SP4 (32-bit)', 153, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 10 SP4 (64-bit)', 152, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 11 (32-bit)', 46, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 11 (64-bit)', 47, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 11 SP1 (32-bit)', 155, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 11 SP2 (32-bit)', 186, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 11 SP2 (64-bit)', 185, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 11 SP3 (32-bit)', 188, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 11 SP3 (32-bit)', 187, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'SUSE Linux Enterprise Server 12 (64-bit)', 244, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows 7 (32-bit)', 48, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows 7 (64-bit)', 49, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows 8 (32-bit)', 165, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows 8 (64-bit)', 166, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 51, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 87, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 88, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 89, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 90, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows Server 2008 (32-bit)', 52, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows Server 2008 (64-bit)', 53, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows Server 2008 R2 (64-bit)', 54, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows Server 2012 (64-bit)', 167, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows Server 2012 R2 (64-bit)', 168, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 58, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Lucid Lynx 10.04 (32-bit)', 121, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Lucid Lynx 10.04 (64-bit)', 126, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Maverick Meerkat 10.10 (32-bit) (experimental)', 156, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Maverick Meerkat 10.10 (64-bit) (experimental)', 157, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Precise Pangolin 12.04 (32-bit)', 163, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Precise Pangolin 12.04 (64-bit)', 164, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Trusty Tahr 14.04', 241, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Trusty Tahr 14.04', 254, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 169, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 170, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 98, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 99, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 60, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 103, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 200, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 201, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 59, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 100, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 202, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Other install media', 203, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Trusty Tahr 14.04', 255, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Xenial Xerus 16.04', 256, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows 10 (32-bit)', 257, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows 10 (64-bit)', 258, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows Server 2016 (64-bit)', 259, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 7', 260, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (32-bit)', 261, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit)', 262, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (32-bit)', 263, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit)', 264, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (32-bit)', 265, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (64-bit)', 266, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (32-bit)', 267, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 6 (64-bit)', 268, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CoreOS', 271, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 7', 272, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Red Hat Enterprise Linux 7', 273, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 7', 274, now(), 0); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100235to4100236-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100235to4100236-cleanup.sql new file mode 100644 index 000000000000..a816d28f929e --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100235to4100236-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.10.0.235 to 4.10.0.236; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100235to4100236.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100235to4100236.sql new file mode 100644 index 000000000000..ea0e7c4b9266 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100235to4100236.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.10.0.235 to 4.10.0.236; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100236to4100237-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100236to4100237-cleanup.sql new file mode 100644 index 000000000000..7ffdcad75012 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100236to4100237-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.10.0.236 to 4.10.0.237; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100236to4100237.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100236to4100237.sql new file mode 100644 index 000000000000..2dc50cb3a92d --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100236to4100237.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.10.0.236 to 4.10.0.237; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100237to4100238-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100237to4100238-cleanup.sql new file mode 100644 index 000000000000..d6cc24aae28d --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100237to4100238-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.10.0.237 to 4.10.0.238; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100237to4100238.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100237to4100238.sql new file mode 100644 index 000000000000..d9fdd706736a --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100237to4100238.sql @@ -0,0 +1,22 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.10.0.237 to 4.10.0.238; +--; + +INSERT IGNORE INTO configuration (`category`, `instance`, `component`, `name`, `value`, `description`) VALUES ('Network', 'DEFAULT', 'management-server', 'vpc.usage.whitelist.cidr', null, 'List of CIDRs to track usage separately in VPCs'); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100238to4100239-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100238to4100239-cleanup.sql new file mode 100644 index 000000000000..eb704d49eb9b --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100238to4100239-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.10.0.238 to 4.10.0.239; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100238to4100239.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100238to4100239.sql new file mode 100644 index 000000000000..33285f34a9cc --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100238to4100239.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.10.0.238 to 4.10.0.239; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100239to4100240-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100239to4100240-cleanup.sql new file mode 100644 index 000000000000..64daa6bb0252 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100239to4100240-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.10.0.239 to 4.10.0.240; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4100239to4100240.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100239to4100240.sql new file mode 100644 index 000000000000..65f7954ccc89 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100239to4100240.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.10.0.239 to 4.10.0.240; +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41000to41100-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100240to41100-cleanup.sql similarity index 96% rename from engine/schema/src/main/resources/META-INF/db/schema-41000to41100-cleanup.sql rename to engine/schema/src/main/resources/META-INF/db/schema-4100240to41100-cleanup.sql index f8d9ce9b73bb..1657a0802e1d 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41000to41100-cleanup.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100240to41100-cleanup.sql @@ -16,7 +16,7 @@ -- under the License. --; --- Schema upgrade cleanup from 4.10.0.0 to 4.11.0.0 +-- Schema upgrade cleanup from 4.10.0.240 to 4.11.0.0; --; DELETE FROM `cloud`.`configuration` WHERE name='snapshot.backup.rightafter'; @@ -66,4 +66,4 @@ CREATE VIEW `cloud`.`user_view` AS left join `cloud`.`async_job` ON async_job.instance_id = user.id and async_job.instance_type = 'User' - and async_job.job_status = 0; \ No newline at end of file + and async_job.job_status = 0; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41000to41100.sql b/engine/schema/src/main/resources/META-INF/db/schema-4100240to41100.sql similarity index 99% rename from engine/schema/src/main/resources/META-INF/db/schema-41000to41100.sql rename to engine/schema/src/main/resources/META-INF/db/schema-4100240to41100.sql index 2db644f927cc..2608cbd6fdb3 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41000to41100.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-4100240to41100.sql @@ -16,7 +16,7 @@ -- under the License. --; --- Schema upgrade from 4.10.0.0 to 4.11.0.0 +-- Schema upgrade from 4.10.0.240 to 4.11.0.0; --; --; @@ -25,7 +25,7 @@ DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_ADD_COLUMN`; CREATE PROCEDURE `cloud`.`IDEMPOTENT_ADD_COLUMN` ( - IN in_table_name VARCHAR(200) + IN in_table_name VARCHAR(200) , IN in_column_name VARCHAR(200) , IN in_column_definition VARCHAR(1000) ) @@ -36,7 +36,7 @@ BEGIN DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_DROP_FOREIGN_KEY`; CREATE PROCEDURE `cloud`.`IDEMPOTENT_DROP_FOREIGN_KEY` ( - IN in_table_name VARCHAR(200) + IN in_table_name VARCHAR(200) , IN in_foreign_key_name VARCHAR(200) ) BEGIN @@ -46,7 +46,7 @@ BEGIN DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_DROP_INDEX`; CREATE PROCEDURE `cloud`.`IDEMPOTENT_DROP_INDEX` ( - IN in_index_name VARCHAR(200) + IN in_index_name VARCHAR(200) , IN in_table_name VARCHAR(200) ) BEGIN @@ -56,7 +56,7 @@ BEGIN DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_CREATE_UNIQUE_INDEX`; CREATE PROCEDURE `cloud`.`IDEMPOTENT_CREATE_UNIQUE_INDEX` ( - IN in_index_name VARCHAR(200) + IN in_index_name VARCHAR(200) , IN in_table_name VARCHAR(200) , IN in_index_definition VARCHAR(1000) ) diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41200to41201-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41200to41201-cleanup.sql new file mode 100644 index 000000000000..137776090809 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41200to41201-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.12.0.0 to 4.12.0.1 +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41200to41201.sql b/engine/schema/src/main/resources/META-INF/db/schema-41200to41201.sql new file mode 100644 index 000000000000..2b089cf48226 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41200to41201.sql @@ -0,0 +1,153 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.12.0.0 to 4.12.0.1 +--; + +-- add KVM / qemu io bursting options PR 3133 +ALTER VIEW `cloud`.`disk_offering_view` AS + SELECT + `disk_offering`.`id` AS `id`, + `disk_offering`.`uuid` AS `uuid`, + `disk_offering`.`name` AS `name`, + `disk_offering`.`display_text` AS `display_text`, + `disk_offering`.`provisioning_type` AS `provisioning_type`, + `disk_offering`.`disk_size` AS `disk_size`, + `disk_offering`.`min_iops` AS `min_iops`, + `disk_offering`.`max_iops` AS `max_iops`, + `disk_offering`.`created` AS `created`, + `disk_offering`.`tags` AS `tags`, + `disk_offering`.`customized` AS `customized`, + `disk_offering`.`customized_iops` AS `customized_iops`, + `disk_offering`.`removed` AS `removed`, + `disk_offering`.`use_local_storage` AS `use_local_storage`, + `disk_offering`.`system_use` AS `system_use`, + `disk_offering`.`hv_ss_reserve` AS `hv_ss_reserve`, + `disk_offering`.`bytes_read_rate` AS `bytes_read_rate`, + `disk_offering`.`bytes_read_rate_max` AS `bytes_read_rate_max`, + `disk_offering`.`bytes_read_rate_max_length` AS `bytes_read_rate_max_length`, + `disk_offering`.`bytes_write_rate` AS `bytes_write_rate`, + `disk_offering`.`bytes_write_rate_max` AS `bytes_write_rate_max`, + `disk_offering`.`bytes_write_rate_max_length` AS `bytes_write_rate_max_length`, + `disk_offering`.`iops_read_rate` AS `iops_read_rate`, + `disk_offering`.`iops_read_rate_max` AS `iops_read_rate_max`, + `disk_offering`.`iops_read_rate_max_length` AS `iops_read_rate_max_length`, + `disk_offering`.`iops_write_rate` AS `iops_write_rate`, + `disk_offering`.`iops_write_rate_max` AS `iops_write_rate_max`, + `disk_offering`.`iops_write_rate_max_length` AS `iops_write_rate_max_length`, + `disk_offering`.`min_iops_per_gb` AS `min_iops_per_gb`, + `disk_offering`.`max_iops_per_gb` AS `max_iops_per_gb`, + `disk_offering`.`highest_min_iops` AS `highest_min_iops`, + `disk_offering`.`highest_max_iops` AS `highest_max_iops`, + `disk_offering`.`cache_mode` AS `cache_mode`, + `disk_offering`.`sort_key` AS `sort_key`, + `disk_offering`.`type` AS `type`, + `disk_offering`.`display_offering` AS `display_offering`, + `domain`.`id` AS `domain_id`, + `domain`.`uuid` AS `domain_uuid`, + `domain`.`name` AS `domain_name`, + `domain`.`path` AS `domain_path` + FROM + (`disk_offering` + LEFT JOIN `domain` ON ((`disk_offering`.`domain_id` = `domain`.`id`))) + WHERE + (`disk_offering`.`state` = 'ACTIVE'); + +-- Add Ubuntu 18.04 LTS as support guest os +INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, created) VALUES (277, UUID(), 10, 'Ubuntu 18.04 (32-bit)', utc_timestamp()); +INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, created) VALUES (278, UUID(), 10, 'Ubuntu 18.04 (64-bit)', utc_timestamp()); +-- Ubuntu 18.04 KVM guest os mapping +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'KVM', 'default', 'Ubuntu 18.04', 277, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'KVM', 'default', 'Ubuntu 18.04', 278, utc_timestamp(), 0); +-- Ubuntu 18.04 XenServer guest os mapping +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '6.5.0', 'Ubuntu Bionic Beaver 18.04', 277, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '6.5.0', 'Ubuntu Bionic Beaver 18.04', 278, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.0.0', 'Ubuntu Bionic Beaver 18.04', 277, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.0.0', 'Ubuntu Bionic Beaver 18.04', 278, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.0', 'Ubuntu Bionic Beaver 18.04', 277, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.0', 'Ubuntu Bionic Beaver 18.04', 278, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.1', 'Ubuntu Bionic Beaver 18.04', 277, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.1', 'Ubuntu Bionic Beaver 18.04', 278, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.2', 'Ubuntu Bionic Beaver 18.04', 277, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.2', 'Ubuntu Bionic Beaver 18.04', 278, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.2.0', 'Ubuntu Bionic Beaver 18.04', 277, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.2.0', 'Ubuntu Bionic Beaver 18.04', 278, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.3.0', 'Ubuntu Bionic Beaver 18.04', 277, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.3.0', 'Ubuntu Bionic Beaver 18.04', 278, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.4.0', 'Ubuntu Bionic Beaver 18.04', 277, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.4.0', 'Ubuntu Bionic Beaver 18.04', 278, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.5.0', 'Ubuntu Bionic Beaver 18.04', 277, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.5.0', 'Ubuntu Bionic Beaver 18.04', 278, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.6.0', 'Ubuntu Bionic Beaver 18.04', 277, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.6.0', 'Ubuntu Bionic Beaver 18.04', 278, utc_timestamp(), 0); + +-- Add Ubuntu 18.10 as support guest os +INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, created) VALUES (279, UUID(), 10, 'Ubuntu 18.10 (32-bit)', utc_timestamp()); +INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, created) VALUES (280, UUID(), 10, 'Ubuntu 18.10 (64-bit)', utc_timestamp()); +-- Ubuntu 18.10 KVM guest os mapping +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'KVM', 'default', 'Ubuntu 18.10', 279, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'KVM', 'default', 'Ubuntu 18.10', 280, utc_timestamp(), 0); +-- Ubuntu 18.10 XenServer guest os mapping +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '6.5.0', 'Ubuntu Cosmic Cuttlefish 18.10', 279, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '6.5.0', 'Ubuntu Cosmic Cuttlefish 18.10', 280, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.0.0', 'Ubuntu Cosmic Cuttlefish 18.10', 279, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.0.0', 'Ubuntu Cosmic Cuttlefish 18.10', 280, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.0', 'Ubuntu Cosmic Cuttlefish 18.10', 279, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.0', 'Ubuntu Cosmic Cuttlefish 18.10', 280, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.1', 'Ubuntu Cosmic Cuttlefish 18.10', 279, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.1', 'Ubuntu Cosmic Cuttlefish 18.10', 280, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.2', 'Ubuntu Cosmic Cuttlefish 18.10', 279, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.2', 'Ubuntu Cosmic Cuttlefish 18.10', 280, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.2.0', 'Ubuntu Cosmic Cuttlefish 18.10', 279, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.2.0', 'Ubuntu Cosmic Cuttlefish 18.10', 280, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.3.0', 'Ubuntu Cosmic Cuttlefish 18.10', 279, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.3.0', 'Ubuntu Cosmic Cuttlefish 18.10', 280, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.4.0', 'Ubuntu Cosmic Cuttlefish 18.10', 279, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.4.0', 'Ubuntu Cosmic Cuttlefish 18.10', 280, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.5.0', 'Ubuntu Cosmic Cuttlefish 18.10', 279, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.5.0', 'Ubuntu Cosmic Cuttlefish 18.10', 280, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.6.0', 'Ubuntu Cosmic Cuttlefish 18.10', 279, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.6.0', 'Ubuntu Cosmic Cuttlefish 18.10', 280, utc_timestamp(), 0); + +-- Add Ubuntu 19.04 as support guest os +INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, created) VALUES (281, UUID(), 10, 'Ubuntu 19.04 (32-bit)', utc_timestamp()); +INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, created) VALUES (282, UUID(), 10, 'Ubuntu 19.04 (64-bit)', utc_timestamp()); +-- Ubuntu 19.04 KVM guest os mapping +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'KVM', 'default', 'Ubuntu 19.04', 281, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'KVM', 'default', 'Ubuntu 19.04', 282, utc_timestamp(), 0); +-- Ubuntu 19.04 XenServer guest os mapping +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '6.5.0', 'Ubuntu Disco Dingo 19.04', 281, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '6.5.0', 'Ubuntu Disco Dingo 19.04', 282, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.0.0', 'Ubuntu Disco Dingo 19.04', 281, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.0.0', 'Ubuntu Disco Dingo 19.04', 282, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.0', 'Ubuntu Disco Dingo 19.04', 281, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.0', 'Ubuntu Disco Dingo 19.04', 282, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.1', 'Ubuntu Disco Dingo 19.04', 281, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.1', 'Ubuntu Disco Dingo 19.04', 282, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.2', 'Ubuntu Disco Dingo 19.04', 281, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.2', 'Ubuntu Disco Dingo 19.04', 282, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.2.0', 'Ubuntu Disco Dingo 19.04', 281, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.2.0', 'Ubuntu Disco Dingo 19.04', 282, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.3.0', 'Ubuntu Disco Dingo 19.04', 281, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.3.0', 'Ubuntu Disco Dingo 19.04', 282, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.4.0', 'Ubuntu Disco Dingo 19.04', 281, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.4.0', 'Ubuntu Disco Dingo 19.04', 282, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.5.0', 'Ubuntu Disco Dingo 19.04', 281, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.5.0', 'Ubuntu Disco Dingo 19.04', 282, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.6.0', 'Ubuntu Disco Dingo 19.04', 281, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.6.0', 'Ubuntu Disco Dingo 19.04', 282, utc_timestamp(), 0); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41200to41300.sql b/engine/schema/src/main/resources/META-INF/db/schema-41200to41300.sql deleted file mode 100644 index 0b368eb310df..000000000000 --- a/engine/schema/src/main/resources/META-INF/db/schema-41200to41300.sql +++ /dev/null @@ -1,542 +0,0 @@ --- Licensed to the Apache Software Foundation (ASF) under one --- or more contributor license agreements. See the NOTICE file --- distributed with this work for additional information --- regarding copyright ownership. The ASF licenses this file --- to you under the Apache License, Version 2.0 (the --- "License"); you may not use this file except in compliance --- with the License. You may obtain a copy of the License at --- --- http://www.apache.org/licenses/LICENSE-2.0 --- --- Unless required by applicable law or agreed to in writing, --- software distributed under the License is distributed on an --- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY --- KIND, either express or implied. See the License for the --- specific language governing permissions and limitations --- under the License. - ---; --- Schema upgrade from 4.12.0.0 to 4.13.0.0 ---; - --- Add XenServer 7.1.2, 7.6 and 8.0 hypervisor capabilities -INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) values (UUID(), 'XenServer', '7.6.0', 1000, 253, 64, 1); -INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) values (UUID(), 'XenServer', '8.0.0', 1000, 253, 64, 1); -INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) values (UUID(), 'XenServer', '7.1.1', 1000, 253, 64, 1); -INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) values (UUID(), 'XenServer', '7.1.2', 1000, 253, 64, 1); - --- Add VMware 6.7 hypervisor capabilities -INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid,hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) VALUES (UUID(), 'VMware', '6.7', '1024', '0', '59', '64', '1', '1'); -INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid,hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) VALUES (UUID(), 'VMware', '6.7.1', '1024', '0', '59', '64', '1', '1'); -INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid,hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) VALUES (UUID(), 'VMware', '6.7.2', '1024', '0', '59', '64', '1', '1'); -INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid,hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) VALUES (UUID(), 'VMware', '6.7.3', '1024', '0', '59', '64', '1', '1'); - --- Update VMware 6.x hypervisor capabilities -UPDATE `cloud`.`hypervisor_capabilities` SET max_guests_limit='1024', max_data_volumes_limit='59', max_hosts_per_cluster='64' WHERE (hypervisor_type='VMware' AND hypervisor_version='6.0' ); -UPDATE `cloud`.`hypervisor_capabilities` SET max_guests_limit='1024', max_data_volumes_limit='59', max_hosts_per_cluster='64' WHERE (hypervisor_type='VMware' AND hypervisor_version='6.5' ); - --- Add new OS versions -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('277', UUID(), '1', 'Ubuntu 17.04', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('278', UUID(), '1', 'Ubuntu 17.10', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('279', UUID(), '1', 'Ubuntu 18.04 LTS', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('280', UUID(), '1', 'Ubuntu 18.10', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('281', UUID(), '1', 'Ubuntu 19.04', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('282', UUID(), '1', 'Red Hat Enterprise Linux 7.3', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('283', UUID(), '1', 'Red Hat Enterprise Linux 7.4', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('284', UUID(), '1', 'Red Hat Enterprise Linux 7.5', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('285', UUID(), '1', 'Red Hat Enterprise Linux 7.6', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('286', UUID(), '1', 'Red Hat Enterprise Linux 8.0', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('289', UUID(), '2', 'Debian GNU/Linux 9 (32-bit)', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('290', UUID(), '2', 'Debian GNU/Linux 9 (64-bit)', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('291', UUID(), '5', 'SUSE Linux Enterprise Server 15 (64-bit)', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('292', UUID(), '2', 'Debian GNU/Linux 10 (32-bit)', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('293', UUID(), '2', 'Debian GNU/Linux 10 (64-bit)', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('294', UUID(), '2', 'Linux 4.x Kernel (32-bit)', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('295', UUID(), '2', 'Linux 4.x Kernel (64-bit)', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('296', UUID(), '3', 'Oracle Linux 8', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('297', UUID(), '1', 'CentOS 8', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('298', UUID(), '9', 'FreeBSD 11 (32-bit)', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('299', UUID(), '9', 'FreeBSD 11 (64-bit)', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('300', UUID(), '9', 'FreeBSD 12 (32-bit)', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('301', UUID(), '9', 'FreeBSD 12 (64-bit)', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('302', UUID(), '1', 'CentOS 6.8', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('303', UUID(), '1', 'CentOS 6.9', now(), '0'); -INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('304', UUID(), '1', 'CentOS 6.10', now(), '0'); - --- Add New and missing VMware 6.5 Guest OSes -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 235, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 236, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 147, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 148, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 213, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 214, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 215, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 216, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 217, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 218, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 219, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 220, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 250, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 251, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux7_64Guest', 247, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntuGuest', 255, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 256, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 277, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 278, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 279, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 280, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'rhel7_64Guest', 282, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'rhel7_64Guest', 283, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'rhel7_64Guest', 284, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'rhel7_64Guest', 285, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'windows9Server64Guest', 276, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'debian9Guest', 289, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'debian9_64Guest', 290, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'debian10Guest', 282, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'debian10_64Guest', 293, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'sles15_64Guest', 291, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'centos6_64Guest', 302, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'centos6_64Guest', 303, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'centos6_64Guest', 304, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'rhel8_64Guest', 286, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 281, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'other4xLinuxGuest', 294, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'other4xLinux64Guest', 295, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux8_64Guest', 296, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'centos8_64Guest', 297, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'freebsd11Guest', 298, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'freebsd11_64Guest', 299, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'freebsd12Guest', 300, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'freebsd12_64Guest', 301, now(), 0); - --- Copy VMware 6.5 Guest OSes to VMware 6.7 -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'VMware', '6.7', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='VMware' AND hypervisor_version='6.5'; -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'VMware', '6.7.1', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='VMware' AND hypervisor_version='6.7'; -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'VMware', '6.7.2', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='VMware' AND hypervisor_version='6.7.1'; -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'VMware', '6.7.3', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='VMware' AND hypervisor_version='6.7.2'; - --- Copy XenServer 7.1.0 to XenServer 7.1.1 -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '7.1.1', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.1.0'; - --- Copy XenServer 7.1.1 to XenServer 7.1.2 -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '7.1.2', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.1.1'; - --- Add New XenServer 7.1.2 Guest OSes -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Debian Stretch 9.0', 289, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Debian Stretch 9.0', 290, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Bionic Beaver 18.04', 279, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows Server 2019 (64-bit)', 276, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit', 303, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 7', 283, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 7', 284, now(), 0); --- Copy XenServer 7.5 hypervisor guest OS mappings to XenServer 7.6 -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '7.6.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.5.0'; - --- Add New XenServer 7.6 Guest OSes -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Jessie 8.0', 269, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Jessie 8.0', 270, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Stretch 9.0', 289, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Stretch 9.0', 290, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Xenial Xerus 16.04', 255, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Xenial Xerus 16.04', 256, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Bionic Beaver 18.04', 279, now(), 0); - --- Copy XenServer 7.6 hypervisor guest OS mappings to XenServer8.0 -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '8.0.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.6.0'; - --- Add New XenServer 8.0 Guest OSes -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '8.0.0', 'Windows Server 2019 (64-bit)', 276, now(), 0); - --- Add Missing KVM Guest OSes -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.6', 262, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.7', 263, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.7', 264, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.8', 302, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.9', 303, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.10', 304, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 7.2', 269, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 7.3', 282, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 7.4', 283, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 7.5', 284, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 7.6', 285, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 8', 286, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Ubuntu 17.04', 277, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Ubuntu 17.10', 278, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Ubuntu 18.04 LTS', 279, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Ubuntu 18.10', 280, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Ubuntu 19.04', 281, now(), 0); - --- DPDK client and server mode support -ALTER TABLE `cloud`.`service_offering_details` CHANGE COLUMN `value` `value` TEXT NOT NULL; - -ALTER TABLE `cloud`.`vpc_offerings` ADD COLUMN `sort_key` int(32) NOT NULL default 0 COMMENT 'sort key used for customising sort method'; - --- Add `sort_key` column to data_center -ALTER TABLE `cloud`.`data_center` ADD COLUMN `sort_key` INT(32) NOT NULL DEFAULT 0; - --- Move domain_id to disk offering details and drop the domain_id column -INSERT INTO `cloud`.`disk_offering_details` (offering_id, name, value, display) SELECT id, 'domainid', domain_id, 0 FROM `cloud`.`disk_offering` WHERE domain_id IS NOT NULL AND type='Disk'; -INSERT INTO `cloud`.`service_offering_details` (service_offering_id, name, value, display) SELECT id, 'domainid', domain_id, 0 FROM `cloud`.`disk_offering` WHERE domain_id IS NOT NULL AND type='Service'; - -ALTER TABLE `cloud`.`disk_offering` DROP COLUMN `domain_id`; - -ALTER TABLE `cloud`.`service_offering_details` DROP FOREIGN KEY `fk_service_offering_details__service_offering_id`, DROP KEY `uk_service_offering_id_name`; -ALTER TABLE `cloud`.`service_offering_details` ADD CONSTRAINT `fk_service_offering_details__service_offering_id` FOREIGN KEY (`service_offering_id`) REFERENCES `service_offering`(`id`) ON DELETE CASCADE; - --- Disk offering with multi-domains and multi-zones -DROP VIEW IF EXISTS `cloud`.`disk_offering_view`; -CREATE VIEW `cloud`.`disk_offering_view` AS - SELECT - `disk_offering`.`id` AS `id`, - `disk_offering`.`uuid` AS `uuid`, - `disk_offering`.`name` AS `name`, - `disk_offering`.`display_text` AS `display_text`, - `disk_offering`.`provisioning_type` AS `provisioning_type`, - `disk_offering`.`disk_size` AS `disk_size`, - `disk_offering`.`min_iops` AS `min_iops`, - `disk_offering`.`max_iops` AS `max_iops`, - `disk_offering`.`created` AS `created`, - `disk_offering`.`tags` AS `tags`, - `disk_offering`.`customized` AS `customized`, - `disk_offering`.`customized_iops` AS `customized_iops`, - `disk_offering`.`removed` AS `removed`, - `disk_offering`.`use_local_storage` AS `use_local_storage`, - `disk_offering`.`system_use` AS `system_use`, - `disk_offering`.`hv_ss_reserve` AS `hv_ss_reserve`, - `disk_offering`.`bytes_read_rate` AS `bytes_read_rate`, - `disk_offering`.`bytes_read_rate_max` AS `bytes_read_rate_max`, - `disk_offering`.`bytes_read_rate_max_length` AS `bytes_read_rate_max_length`, - `disk_offering`.`bytes_write_rate` AS `bytes_write_rate`, - `disk_offering`.`bytes_write_rate_max` AS `bytes_write_rate_max`, - `disk_offering`.`bytes_write_rate_max_length` AS `bytes_write_rate_max_length`, - `disk_offering`.`iops_read_rate` AS `iops_read_rate`, - `disk_offering`.`iops_read_rate_max` AS `iops_read_rate_max`, - `disk_offering`.`iops_read_rate_max_length` AS `iops_read_rate_max_length`, - `disk_offering`.`iops_write_rate` AS `iops_write_rate`, - `disk_offering`.`iops_write_rate_max` AS `iops_write_rate_max`, - `disk_offering`.`iops_write_rate_max_length` AS `iops_write_rate_max_length`, - `disk_offering`.`cache_mode` AS `cache_mode`, - `disk_offering`.`sort_key` AS `sort_key`, - `disk_offering`.`type` AS `type`, - `disk_offering`.`display_offering` AS `display_offering`, - `disk_offering`.`state` AS `state`, - GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id, - GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid, - GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name, - GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path, - GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id, - GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid, - GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name - FROM - `cloud`.`disk_offering` - LEFT JOIN - `cloud`.`disk_offering_details` AS `domain_details` ON `domain_details`.`offering_id` = `disk_offering`.`id` AND `domain_details`.`name`='domainid' - LEFT JOIN - `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`) - LEFT JOIN - `cloud`.`disk_offering_details` AS `zone_details` ON `zone_details`.`offering_id` = `disk_offering`.`id` AND `zone_details`.`name`='zoneid' - LEFT JOIN - `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`) - WHERE - `disk_offering`.`state`='Active' - GROUP BY - `disk_offering`.`id`; - --- Service offering with multi-domains and multi-zones -DROP VIEW IF EXISTS `cloud`.`service_offering_view`; -CREATE VIEW `cloud`.`service_offering_view` AS - SELECT - `service_offering`.`id` AS `id`, - `disk_offering`.`uuid` AS `uuid`, - `disk_offering`.`name` AS `name`, - `disk_offering`.`display_text` AS `display_text`, - `disk_offering`.`provisioning_type` AS `provisioning_type`, - `disk_offering`.`created` AS `created`, - `disk_offering`.`tags` AS `tags`, - `disk_offering`.`removed` AS `removed`, - `disk_offering`.`use_local_storage` AS `use_local_storage`, - `disk_offering`.`system_use` AS `system_use`, - `disk_offering`.`customized_iops` AS `customized_iops`, - `disk_offering`.`min_iops` AS `min_iops`, - `disk_offering`.`max_iops` AS `max_iops`, - `disk_offering`.`hv_ss_reserve` AS `hv_ss_reserve`, - `disk_offering`.`bytes_read_rate` AS `bytes_read_rate`, - `disk_offering`.`bytes_read_rate_max` AS `bytes_read_rate_max`, - `disk_offering`.`bytes_read_rate_max_length` AS `bytes_read_rate_max_length`, - `disk_offering`.`bytes_write_rate` AS `bytes_write_rate`, - `disk_offering`.`bytes_write_rate_max` AS `bytes_write_rate_max`, - `disk_offering`.`bytes_write_rate_max_length` AS `bytes_write_rate_max_length`, - `disk_offering`.`iops_read_rate` AS `iops_read_rate`, - `disk_offering`.`iops_read_rate_max` AS `iops_read_rate_max`, - `disk_offering`.`iops_read_rate_max_length` AS `iops_read_rate_max_length`, - `disk_offering`.`iops_write_rate` AS `iops_write_rate`, - `disk_offering`.`iops_write_rate_max` AS `iops_write_rate_max`, - `disk_offering`.`iops_write_rate_max_length` AS `iops_write_rate_max_length`, - `disk_offering`.`cache_mode` AS `cache_mode`, - `service_offering`.`cpu` AS `cpu`, - `service_offering`.`speed` AS `speed`, - `service_offering`.`ram_size` AS `ram_size`, - `service_offering`.`nw_rate` AS `nw_rate`, - `service_offering`.`mc_rate` AS `mc_rate`, - `service_offering`.`ha_enabled` AS `ha_enabled`, - `service_offering`.`limit_cpu_use` AS `limit_cpu_use`, - `service_offering`.`host_tag` AS `host_tag`, - `service_offering`.`default_use` AS `default_use`, - `service_offering`.`vm_type` AS `vm_type`, - `service_offering`.`sort_key` AS `sort_key`, - `service_offering`.`is_volatile` AS `is_volatile`, - `service_offering`.`deployment_planner` AS `deployment_planner`, - GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id, - GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid, - GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name, - GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path, - GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id, - GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid, - GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name - FROM - `cloud`.`service_offering` - INNER JOIN - `cloud`.`disk_offering_view` AS `disk_offering` ON service_offering.id = disk_offering.id - LEFT JOIN - `cloud`.`service_offering_details` AS `domain_details` ON `domain_details`.`service_offering_id` = `disk_offering`.`id` AND `domain_details`.`name`='domainid' - LEFT JOIN - `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`) - LEFT JOIN - `cloud`.`service_offering_details` AS `zone_details` ON `zone_details`.`service_offering_id` = `disk_offering`.`id` AND `zone_details`.`name`='zoneid' - LEFT JOIN - `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`) - WHERE - `disk_offering`.`state`='Active' - GROUP BY - `service_offering`.`id`; - --- Add display column for network offering details table -ALTER TABLE `cloud`.`network_offering_details` ADD COLUMN `display` tinyint(1) NOT NULL DEFAULT '1' COMMENT 'True if the detail can be displayed to the end user'; - --- Network offering with multi-domains and multi-zones -DROP VIEW IF EXISTS `cloud`.`network_offering_view`; -CREATE VIEW `cloud`.`network_offering_view` AS - SELECT - `network_offerings`.`id` AS `id`, - `network_offerings`.`uuid` AS `uuid`, - `network_offerings`.`name` AS `name`, - `network_offerings`.`unique_name` AS `unique_name`, - `network_offerings`.`display_text` AS `display_text`, - `network_offerings`.`nw_rate` AS `nw_rate`, - `network_offerings`.`mc_rate` AS `mc_rate`, - `network_offerings`.`traffic_type` AS `traffic_type`, - `network_offerings`.`tags` AS `tags`, - `network_offerings`.`system_only` AS `system_only`, - `network_offerings`.`specify_vlan` AS `specify_vlan`, - `network_offerings`.`service_offering_id` AS `service_offering_id`, - `network_offerings`.`conserve_mode` AS `conserve_mode`, - `network_offerings`.`created` AS `created`, - `network_offerings`.`removed` AS `removed`, - `network_offerings`.`default` AS `default`, - `network_offerings`.`availability` AS `availability`, - `network_offerings`.`dedicated_lb_service` AS `dedicated_lb_service`, - `network_offerings`.`shared_source_nat_service` AS `shared_source_nat_service`, - `network_offerings`.`sort_key` AS `sort_key`, - `network_offerings`.`redundant_router_service` AS `redundant_router_service`, - `network_offerings`.`state` AS `state`, - `network_offerings`.`guest_type` AS `guest_type`, - `network_offerings`.`elastic_ip_service` AS `elastic_ip_service`, - `network_offerings`.`eip_associate_public_ip` AS `eip_associate_public_ip`, - `network_offerings`.`elastic_lb_service` AS `elastic_lb_service`, - `network_offerings`.`specify_ip_ranges` AS `specify_ip_ranges`, - `network_offerings`.`inline` AS `inline`, - `network_offerings`.`is_persistent` AS `is_persistent`, - `network_offerings`.`internal_lb` AS `internal_lb`, - `network_offerings`.`public_lb` AS `public_lb`, - `network_offerings`.`egress_default_policy` AS `egress_default_policy`, - `network_offerings`.`concurrent_connections` AS `concurrent_connections`, - `network_offerings`.`keep_alive_enabled` AS `keep_alive_enabled`, - `network_offerings`.`supports_streched_l2` AS `supports_streched_l2`, - `network_offerings`.`supports_public_access` AS `supports_public_access`, - `network_offerings`.`for_vpc` AS `for_vpc`, - `network_offerings`.`service_package_id` AS `service_package_id`, - GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id, - GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid, - GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name, - GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path, - GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id, - GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid, - GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name - FROM - `cloud`.`network_offerings` - LEFT JOIN - `cloud`.`network_offering_details` AS `domain_details` ON `domain_details`.`network_offering_id` = `network_offerings`.`id` AND `domain_details`.`name`='domainid' - LEFT JOIN - `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`) - LEFT JOIN - `cloud`.`network_offering_details` AS `zone_details` ON `zone_details`.`network_offering_id` = `network_offerings`.`id` AND `zone_details`.`name`='zoneid' - LEFT JOIN - `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`) - GROUP BY - `network_offerings`.`id`; - --- Create VPC offering details table -CREATE TABLE `vpc_offering_details` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, - `offering_id` bigint(20) unsigned NOT NULL COMMENT 'vpc offering id', - `name` varchar(255) NOT NULL, - `value` varchar(1024) NOT NULL, - `display` tinyint(1) NOT NULL DEFAULT '1' COMMENT 'True if the detail can be displayed to the end user', - PRIMARY KEY (`id`), - KEY `fk_vpc_offering_details__vpc_offering_id` (`offering_id`), - CONSTRAINT `fk_vpc_offering_details__vpc_offering_id` FOREIGN KEY (`offering_id`) REFERENCES `vpc_offerings` (`id`) ON DELETE CASCADE -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - --- VPC offering with multi-domains and multi-zones -DROP VIEW IF EXISTS `cloud`.`vpc_offering_view`; -CREATE VIEW `cloud`.`vpc_offering_view` AS - SELECT - `vpc_offerings`.`id` AS `id`, - `vpc_offerings`.`uuid` AS `uuid`, - `vpc_offerings`.`name` AS `name`, - `vpc_offerings`.`unique_name` AS `unique_name`, - `vpc_offerings`.`display_text` AS `display_text`, - `vpc_offerings`.`state` AS `state`, - `vpc_offerings`.`default` AS `default`, - `vpc_offerings`.`created` AS `created`, - `vpc_offerings`.`removed` AS `removed`, - `vpc_offerings`.`service_offering_id` AS `service_offering_id`, - `vpc_offerings`.`supports_distributed_router` AS `supports_distributed_router`, - `vpc_offerings`.`supports_region_level_vpc` AS `supports_region_level_vpc`, - `vpc_offerings`.`redundant_router_service` AS `redundant_router_service`, - `vpc_offerings`.`sort_key` AS `sort_key`, - GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id, - GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid, - GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name, - GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path, - GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id, - GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid, - GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name - FROM - `cloud`.`vpc_offerings` - LEFT JOIN - `cloud`.`vpc_offering_details` AS `domain_details` ON `domain_details`.`offering_id` = `vpc_offerings`.`id` AND `domain_details`.`name`='domainid' - LEFT JOIN - `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`) - LEFT JOIN - `cloud`.`vpc_offering_details` AS `zone_details` ON `zone_details`.`offering_id` = `vpc_offerings`.`id` AND `zone_details`.`name`='zoneid' - LEFT JOIN - `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`) - GROUP BY - `vpc_offerings`.`id`; - --- Recreate data_center_view -DROP VIEW IF EXISTS `cloud`.`data_center_view`; -CREATE VIEW `cloud`.`data_center_view` AS - select - data_center.id, - data_center.uuid, - data_center.name, - data_center.is_security_group_enabled, - data_center.is_local_storage_enabled, - data_center.description, - data_center.dns1, - data_center.dns2, - data_center.ip6_dns1, - data_center.ip6_dns2, - data_center.internal_dns1, - data_center.internal_dns2, - data_center.guest_network_cidr, - data_center.domain, - data_center.networktype, - data_center.allocation_state, - data_center.zone_token, - data_center.dhcp_provider, - data_center.removed, - data_center.sort_key, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - dedicated_resources.affinity_group_id, - dedicated_resources.account_id, - affinity_group.uuid affinity_group_uuid - from - `cloud`.`data_center` - left join - `cloud`.`domain` ON data_center.domain_id = domain.id - left join - `cloud`.`dedicated_resources` ON data_center.id = dedicated_resources.data_center_id - left join - `cloud`.`affinity_group` ON dedicated_resources.affinity_group_id = affinity_group.id; - --- Remove key/value tags from project_view -DROP VIEW IF EXISTS `cloud`.`project_view`; -CREATE VIEW `cloud`.`project_view` AS - select - projects.id, - projects.uuid, - projects.name, - projects.display_text, - projects.state, - projects.removed, - projects.created, - projects.project_account_id, - account.account_name owner, - pacct.account_id, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path - from - `cloud`.`projects` - inner join - `cloud`.`domain` ON projects.domain_id = domain.id - inner join - `cloud`.`project_account` ON projects.id = project_account.project_id - and project_account.account_role = 'Admin' - inner join - `cloud`.`account` ON account.id = project_account.account_id - left join - `cloud`.`project_account` pacct ON projects.id = pacct.project_id; - --- KVM: Add background task to upload certificates for direct download -CREATE TABLE `cloud`.`direct_download_certificate` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, - `uuid` varchar(40) NOT NULL, - `alias` varchar(255) NOT NULL, - `certificate` text NOT NULL, - `hypervisor_type` varchar(45) NOT NULL, - `zone_id` bigint(20) unsigned NOT NULL, - PRIMARY KEY (`id`), - KEY `i_direct_download_certificate_alias` (`alias`), - KEY `fk_direct_download_certificate__zone_id` (`zone_id`), - CONSTRAINT `fk_direct_download_certificate__zone_id` FOREIGN KEY (`zone_id`) REFERENCES `data_center` (`id`) ON DELETE CASCADE -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`direct_download_certificate_host_map` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, - `certificate_id` bigint(20) unsigned NOT NULL, - `host_id` bigint(20) unsigned NOT NULL, - `revoked` int(1) NOT NULL DEFAULT 0, - PRIMARY KEY (`id`), - KEY `fk_direct_download_certificate_host_map__host_id` (`host_id`), - KEY `fk_direct_download_certificate_host_map__certificate_id` (`certificate_id`), - CONSTRAINT `fk_direct_download_certificate_host_map__host_id` FOREIGN KEY (`host_id`) REFERENCES `host` (`id`) ON DELETE CASCADE, - CONSTRAINT `fk_direct_download_certificate_host_map__certificate_id` FOREIGN KEY (`certificate_id`) REFERENCES `direct_download_certificate` (`id`) ON DELETE CASCADE -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - --- [Vmware] Allow configuring appliances on the VM instance wizard when OVF properties are available -CREATE TABLE `cloud`.`template_ovf_properties` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, - `template_id` bigint(20) unsigned NOT NULL, - `key` VARCHAR(100) NOT NULL, - `type` VARCHAR(45) DEFAULT NULL, - `value` VARCHAR(100) DEFAULT NULL, - `password` TINYINT(1) NOT NULL DEFAULT '0', - `qualifiers` TEXT DEFAULT NULL, - `user_configurable` TINYINT(1) NOT NULL DEFAULT '0', - `label` TEXT DEFAULT NULL, - `description` TEXT DEFAULT NULL, - PRIMARY KEY (`id`), - CONSTRAINT `fk_template_ovf_properties__template_id` FOREIGN KEY (`template_id`) REFERENCES `vm_template`(`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - --- Add VM snapshot ID on usage helper tables -ALTER TABLE `cloud_usage`.`usage_vmsnapshot` ADD COLUMN `vm_snapshot_id` BIGINT(20) NULL DEFAULT NULL AFTER `processed`; -ALTER TABLE `cloud_usage`.`usage_snapshot_on_primary` ADD COLUMN `vm_snapshot_id` BIGINT(20) NULL DEFAULT NULL AFTER `deleted`; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41201to41202-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41201to41202-cleanup.sql new file mode 100644 index 000000000000..744bc7bb2c09 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41201to41202-cleanup.sql @@ -0,0 +1,82 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.12.0.1 to 4.12.0.2 +--; + +-- Ubuntu 18.04 fixes +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '6.5.0' AND `guest_os_id` = 277; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '6.5.0' AND `guest_os_id` = 278; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.0.0' AND `guest_os_id` = 277; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.0.0' AND `guest_os_id` = 278; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.0' AND `guest_os_id` = 277; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.0' AND `guest_os_id` = 278; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.1' AND `guest_os_id` = 277; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.1' AND `guest_os_id` = 278; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.2.0' AND `guest_os_id` = 277; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.2.0' AND `guest_os_id` = 278; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.3.0' AND `guest_os_id` = 277; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.3.0' AND `guest_os_id` = 278; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.4.0' AND `guest_os_id` = 277; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.4.0' AND `guest_os_id` = 278; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.5.0' AND `guest_os_id` = 277; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.5.0' AND `guest_os_id` = 278; + +-- Ubuntu 18.10 fixes +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '6.5.0' AND `guest_os_id` = 279; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '6.5.0' AND `guest_os_id` = 280; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.0.0' AND `guest_os_id` = 279; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.0.0' AND `guest_os_id` = 280; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.0' AND `guest_os_id` = 279; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.0' AND `guest_os_id` = 280; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.1' AND `guest_os_id` = 279; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.1' AND `guest_os_id` = 280; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.2' AND `guest_os_id` = 279; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.2' AND `guest_os_id` = 280; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.2.0' AND `guest_os_id` = 279; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.2.0' AND `guest_os_id` = 280; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.3.0' AND `guest_os_id` = 279; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.3.0' AND `guest_os_id` = 280; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.4.0' AND `guest_os_id` = 279; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.4.0' AND `guest_os_id` = 280; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.5.0' AND `guest_os_id` = 279; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.5.0' AND `guest_os_id` = 280; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.6.0' AND `guest_os_id` = 279; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.6.0' AND `guest_os_id` = 280; + +-- Ubuntu 19.04 fixes +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '6.5.0' AND `guest_os_id` = 281; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '6.5.0' AND `guest_os_id` = 282; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.0.0' AND `guest_os_id` = 281; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.0.0' AND `guest_os_id` = 282; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.0' AND `guest_os_id` = 281; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.0' AND `guest_os_id` = 282; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.1' AND `guest_os_id` = 281; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.1' AND `guest_os_id` = 282; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.2' AND `guest_os_id` = 281; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.1.2' AND `guest_os_id` = 282; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.2.0' AND `guest_os_id` = 281; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.2.0' AND `guest_os_id` = 282; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.3.0' AND `guest_os_id` = 281; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.3.0' AND `guest_os_id` = 282; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.4.0' AND `guest_os_id` = 281; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.4.0' AND `guest_os_id` = 282; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.5.0' AND `guest_os_id` = 281; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.5.0' AND `guest_os_id` = 282; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.6.0' AND `guest_os_id` = 281; +UPDATE `cloud`.`guest_os_hypervisor` SET `guest_os_name` = 'Other install media' WHERE `hypervisor_type` = 'Xenserver' AND `hypervisor_version` = '7.6.0' AND `guest_os_id` = 282; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41201to41202.sql b/engine/schema/src/main/resources/META-INF/db/schema-41201to41202.sql new file mode 100644 index 000000000000..335a326980e5 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41201to41202.sql @@ -0,0 +1,32 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.12.0.1 to 4.12.0.2 +--; + +-- Windows Server 2019 XenServer guest os mapping +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '6.5.0', 'Other install media', 276, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.0.0', 'Other install media', 276, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.0', 'Other install media', 276, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.1', 'Other install media', 276, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.2', 'Other install media', 276, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.2.0', 'Other install media', 276, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.3.0', 'Other install media', 276, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.4.0', 'Other install media', 276, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.5.0', 'Other install media', 276, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.6.0', 'Other install media', 276, utc_timestamp(), 0); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41200to41300-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-412025to41300-cleanup.sql similarity index 100% rename from engine/schema/src/main/resources/META-INF/db/schema-41200to41300-cleanup.sql rename to engine/schema/src/main/resources/META-INF/db/schema-412025to41300-cleanup.sql diff --git a/engine/schema/src/main/resources/META-INF/db/schema-412025to41300.sql b/engine/schema/src/main/resources/META-INF/db/schema-412025to41300.sql new file mode 100644 index 000000000000..3d1920ca492d --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-412025to41300.sql @@ -0,0 +1,668 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.12.0.0 to 4.13.0.0 +--; + +-- Add XenServer 7.1.2, 7.6 and 8.0 hypervisor capabilities +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) values (UUID(), 'XenServer', '7.6.0', 1000, 253, 64, 1); +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) values (UUID(), 'XenServer', '8.0.0', 1000, 253, 64, 1); +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) values (UUID(), 'XenServer', '7.1.1', 1000, 253, 64, 1); +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) values (UUID(), 'XenServer', '7.1.2', 1000, 253, 64, 1); + +-- Add VMware 6.7 hypervisor capabilities +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid,hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) VALUES (UUID(), 'VMware', '6.7', '1024', '0', '59', '64', '1', '1'); +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid,hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) VALUES (UUID(), 'VMware', '6.7.1', '1024', '0', '59', '64', '1', '1'); +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid,hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) VALUES (UUID(), 'VMware', '6.7.2', '1024', '0', '59', '64', '1', '1'); +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid,hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) VALUES (UUID(), 'VMware', '6.7.3', '1024', '0', '59', '64', '1', '1'); + +-- Update VMware 6.x hypervisor capabilities +UPDATE `cloud`.`hypervisor_capabilities` SET max_guests_limit='1024', max_data_volumes_limit='59', max_hosts_per_cluster='64' WHERE (hypervisor_type='VMware' AND hypervisor_version='6.0' ); +UPDATE `cloud`.`hypervisor_capabilities` SET max_guests_limit='1024', max_data_volumes_limit='59', max_hosts_per_cluster='64' WHERE (hypervisor_type='VMware' AND hypervisor_version='6.5' ); + + +-- Copy from 41520to41600 - PR#4699 Drop the procedure `ADD_GUEST_OS_AND_HYPERVISOR_MAPPING` if it already exist. +DROP PROCEDURE IF EXISTS `cloud`.`ADD_GUEST_OS_AND_HYPERVISOR_MAPPING`; + +-- Copy from 41520to41600 - PR#4699 Create the procedure `ADD_GUEST_OS_AND_HYPERVISOR_MAPPING` to add guest_os and guest_os_hypervisor mapping. +CREATE PROCEDURE `cloud`.`ADD_GUEST_OS_AND_HYPERVISOR_MAPPING` ( + IN guest_os_category_id bigint(20) unsigned, + IN guest_os_display_name VARCHAR(255), + IN guest_os_hypervisor_hypervisor_type VARCHAR(32), + IN guest_os_hypervisor_hypervisor_version VARCHAR(32), + IN guest_os_hypervisor_guest_os_name VARCHAR(255) +) +BEGIN + INSERT INTO cloud.guest_os (uuid, category_id, display_name, created) + SELECT UUID(), guest_os_category_id, guest_os_display_name, now() + FROM DUAL + WHERE not exists( SELECT 1 + FROM cloud.guest_os + WHERE cloud.guest_os.category_id = guest_os_category_id + AND cloud.guest_os.display_name = guest_os_display_name) + +; INSERT INTO cloud.guest_os_hypervisor (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created) + SELECT UUID(), guest_os_hypervisor_hypervisor_type, guest_os_hypervisor_hypervisor_version, guest_os_hypervisor_guest_os_name, guest_os.id, now() + FROM cloud.guest_os + WHERE guest_os.category_id = guest_os_category_id + AND guest_os.display_name = guest_os_display_name + AND NOT EXISTS (SELECT 1 + FROM cloud.guest_os_hypervisor as hypervisor + WHERE hypervisor_type = guest_os_hypervisor_hypervisor_type + AND hypervisor_version = guest_os_hypervisor_hypervisor_version + AND hypervisor.guest_os_id = guest_os.id + AND hypervisor.guest_os_name = guest_os_hypervisor_guest_os_name) +;END; + +DROP PROCEDURE IF EXISTS `cloud`.`ADD_GUEST_OS_ONLY`; +CREATE PROCEDURE `cloud`.`ADD_GUEST_OS_ONLY` ( + IN guest_os_category_id bigint(20) unsigned, + IN guest_os_display_name VARCHAR(255) +) +BEGIN + INSERT INTO cloud.guest_os (uuid, category_id, display_name, created) + SELECT UUID(), guest_os_category_id, guest_os_display_name, now() + FROM DUAL + WHERE not exists( SELECT 1 + FROM cloud.guest_os + WHERE cloud.guest_os.category_id = guest_os_category_id + AND cloud.guest_os.display_name = guest_os_display_name) +;END; + +---- Add new OS versions +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('277', UUID(), '1', 'Ubuntu 17.04', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('278', UUID(), '1', 'Ubuntu 17.10', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('279', UUID(), '1', 'Ubuntu 18.04 LTS', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('280', UUID(), '1', 'Ubuntu 18.10', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('281', UUID(), '1', 'Ubuntu 19.04', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('282', UUID(), '1', 'Red Hat Enterprise Linux 7.3', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('283', UUID(), '1', 'Red Hat Enterprise Linux 7.4', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('284', UUID(), '1', 'Red Hat Enterprise Linux 7.5', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('285', UUID(), '1', 'Red Hat Enterprise Linux 7.6', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('286', UUID(), '1', 'Red Hat Enterprise Linux 8.0', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('289', UUID(), '2', 'Debian GNU/Linux 9 (32-bit)', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('290', UUID(), '2', 'Debian GNU/Linux 9 (64-bit)', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('291', UUID(), '5', 'SUSE Linux Enterprise Server 15 (64-bit)', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('292', UUID(), '2', 'Debian GNU/Linux 10 (32-bit)', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('293', UUID(), '2', 'Debian GNU/Linux 10 (64-bit)', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('294', UUID(), '2', 'Linux 4.x Kernel (32-bit)', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('295', UUID(), '2', 'Linux 4.x Kernel (64-bit)', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('296', UUID(), '3', 'Oracle Linux 8', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('297', UUID(), '1', 'CentOS 8', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('298', UUID(), '9', 'FreeBSD 11 (32-bit)', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('299', UUID(), '9', 'FreeBSD 11 (64-bit)', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('300', UUID(), '9', 'FreeBSD 12 (32-bit)', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('301', UUID(), '9', 'FreeBSD 12 (64-bit)', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('302', UUID(), '1', 'CentOS 6.8', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('303', UUID(), '1', 'CentOS 6.9', now(), '0'); +--INSERT INTO cloud.guest_os (id, uuid, category_id, display_name, created, is_user_defined) VALUES ('304', UUID(), '1', 'CentOS 6.10', now(), '0'); +-- +---- Add New and missing VMware 6.5 Guest OSes +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 235, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 236, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 147, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 148, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 213, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 214, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 215, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 216, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 217, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 218, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 219, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 220, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6Guest', 250, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux6_64Guest', 251, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux7_64Guest', 247, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntuGuest', 255, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 256, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 277, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 278, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 279, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 280, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'rhel7_64Guest', 282, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'rhel7_64Guest', 283, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'rhel7_64Guest', 284, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'rhel7_64Guest', 285, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'windows9Server64Guest', 276, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'debian9Guest', 289, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'debian9_64Guest', 290, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'debian10Guest', 282, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'debian10_64Guest', 293, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'sles15_64Guest', 291, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'centos6_64Guest', 302, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'centos6_64Guest', 303, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'centos6_64Guest', 304, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'rhel8_64Guest', 286, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'ubuntu64Guest', 281, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'other4xLinuxGuest', 294, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'other4xLinux64Guest', 295, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'oracleLinux8_64Guest', 296, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'centos8_64Guest', 297, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'freebsd11Guest', 298, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'freebsd11_64Guest', 299, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'freebsd12Guest', 300, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.5', 'freebsd12_64Guest', 301, now(), 0); + +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6 (32-bit)', 'VMware', '6.5', 'oracleLinux6Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6 (64-bit)', 'VMware', '6.5', 'oracleLinux6_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.0 (32-bit)', 'VMware', '6.5', 'oracleLinux6Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.0 (64-bit)', 'VMware', '6.5', 'oracleLinux6_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.1 (32-bit)', 'VMware', '6.5', 'oracleLinux6Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.1 (64-bit)', 'VMware', '6.5', 'oracleLinux6_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.2 (32-bit)', 'VMware', '6.5', 'oracleLinux6Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.2 (64-bit)', 'VMware', '6.5', 'oracleLinux6_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.3 (32-bit)', 'VMware', '6.5', 'oracleLinux6Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.3 (64-bit)', 'VMware', '6.5', 'oracleLinux6_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.4 (32-bit)', 'VMware', '6.5', 'oracleLinux6Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.4 (64-bit)', 'VMware', '6.5', 'oracleLinux6_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.5 (32-bit)', 'VMware', '6.5', 'oracleLinux6Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Enterprise Linux 6.5 (64-bit)', 'VMware', '6.5', 'oracleLinux6_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Linux 7', 'VMware', '6.5', 'oracleLinux7_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 16.04 (32-bit)', 'VMware', '6.5', 'ubuntuGuest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 16.04 (64-bit)', 'VMware', '6.5', 'ubuntu64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows Server 2019 (64-bit)', 'VMware', '6.5', 'windows9Server64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 17.04', 'VMware', '6.5', 'ubuntu64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 17.10', 'VMware', '6.5', 'ubuntu64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 18.04 LTS', 'VMware', '6.5', 'ubuntu64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 18.10', 'VMware', '6.5', 'ubuntu64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 19.04', 'VMware', '6.5', 'ubuntu64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.3', 'VMware', '6.5', 'rhel7_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.4', 'VMware', '6.5', 'rhel7_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.5', 'VMware', '6.5', 'rhel7_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.6', 'VMware', '6.5', 'rhel7_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 8.0', 'VMware', '6.5', 'rhel8_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 9 (32-bit)', 'VMware', '6.5', 'debian9Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 9 (64-bit)', 'VMware', '6.5', 'debian9_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (5, 'SUSE Linux Enterprise Server 15 (64-bit)', 'VMware', '6.5', 'sles15_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 10 (32-bit)', 'VMware', '6.5', 'debian10Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 10 (64-bit)', 'VMware', '6.5', 'debian10_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Linux 4.x Kernel (32-bit)', 'VMware', '6.5', 'other4xLinuxGuest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Linux 4.x Kernel (64-bit)', 'VMware', '6.5', 'other4xLinux64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Linux 8', 'VMware', '6.5', 'oracleLinux8_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 8', 'VMware', '6.5', 'centos8_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (9, 'FreeBSD 11 (32-bit)', 'VMware', '6.5', 'freebsd11Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (9, 'FreeBSD 11 (64-bit)', 'VMware', '6.5', 'freebsd11_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (9, 'FreeBSD 12 (32-bit)', 'VMware', '6.5', 'freebsd12Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (9, 'FreeBSD 12 (64-bit)', 'VMware', '6.5', 'freebsd12_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 6.8', 'VMware', '6.5', 'centos6_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 6.9', 'VMware', '6.5', 'centos6_64Guest'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 6.10', 'VMware', '6.5', 'centos6_64Guest'); + +-- Copy VMware 6.5 Guest OSes to VMware 6.7 +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'VMware', '6.7', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='VMware' AND hypervisor_version='6.5'; +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'VMware', '6.7.1', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='VMware' AND hypervisor_version='6.7'; +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'VMware', '6.7.2', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='VMware' AND hypervisor_version='6.7.1'; +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'VMware', '6.7.3', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='VMware' AND hypervisor_version='6.7.2'; + +-- Copy XenServer 7.1.0 to XenServer 7.1.1 +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '7.1.1', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.1.0'; + +-- Copy XenServer 7.1.1 to XenServer 7.1.2 +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '7.1.2', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.1.1'; + +---- Add New XenServer 7.1.2 Guest OSes +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Debian Stretch 9.0', 289, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Debian Stretch 9.0', 290, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Ubuntu Bionic Beaver 18.04', 279, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'Windows Server 2019 (64-bit)', 276, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 6 (64-bit', 303, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 7', 283, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.2', 'CentOS 7', 284, now(), 0); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 9 (32-bit)', 'Xenserver', '7.1.2', 'Debian Stretch 9.0'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 9 (64-bit)', 'Xenserver', '7.1.2', 'Debian Stretch 9.0'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 18.04', 'Xenserver', '7.1.2', 'Ubuntu Bionic Beaver 18.04'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows Server 2019 (64-bit)', 'Xenserver', '7.1.2', 'Windows Server 2019 (64-bit)'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 6.9', 'Xenserver', '7.1.2', 'CentOS 6 (64-bit)'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.4', 'Xenserver', '7.1.2', 'CentOS 7'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.5', 'Xenserver', '7.1.2', 'CentOS 7'); + +-- Copy XenServer 7.5 hypervisor guest OS mappings to XenServer 7.6 +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '7.6.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.5.0'; + +---- Add New XenServer 7.6 Guest OSes +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Jessie 8.0', 269, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Jessie 8.0', 270, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Stretch 9.0', 289, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Debian Stretch 9.0', 290, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Xenial Xerus 16.04', 255, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Xenial Xerus 16.04', 256, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.6.0', 'Ubuntu Bionic Beaver 18.04', 279, now(), 0); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 8 (32-bit)', 'Xenserver', '7.6.0', 'Debian Jessie 8.0'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 8 (64-bit)', 'Xenserver', '7.6.0', 'Debian Jessie 8.0'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 9 (32-bit)', 'Xenserver', '7.6.0', 'Debian Jessie 9.0'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 9 (64-bit)', 'Xenserver', '7.6.0', 'Debian Jessie 9.0'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 16.04 (32-bit)', 'Xenserver', '7.6.0', 'Ubuntu Xenial Xerus 16.04'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 16.04 (32-bit)', 'Xenserver', '7.6.0', 'Ubuntu Xenial Xerus 16.04'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 18.04', 'Xenserver', '7.6.0', 'Ubuntu Bionic Beaver 18.04'); + +-- Copy XenServer 7.6 hypervisor guest OS mappings to XenServer8.0 +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '8.0.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.6.0'; + +---- Add New XenServer 8.0 Guest OSes +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows Server 2019 (64-bit)', 'Xenserver', '8.0.0', 'Windows Server 2019 (64-bit)'); + +---- Add Missing KVM Guest OSes +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.6', 262, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.7', 263, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.7', 264, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.8', 302, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.9', 303, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'CentOS 6.10', 304, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 7.2', 269, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 7.3', 282, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 7.4', 283, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 7.5', 284, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 7.6', 285, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Red Hat Enterprise Linux 8', 286, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Ubuntu 17.04', 277, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Ubuntu 17.10', 278, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Ubuntu 18.04 LTS', 279, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Ubuntu 18.10', 280, now(), 0); +--INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Ubuntu 19.04', 281, now(), 0); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 6.6 (64-bit)', 'KVM', 'default', 'CentOS 6.6'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 6.7 (32-bit)', 'KVM', 'default', 'CentOS 6.7'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 6.7 (64-bit)', 'KVM', 'default', 'CentOS 6.7'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 6.8', 'KVM', 'default', 'CentOS 6.8'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 6.9', 'KVM', 'default', 'CentOS 6.9'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS 6.10', 'KVM', 'default', 'CentOS 6.10'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.2', 'KVM', 'default', 'Red Hat Enterprise Linux 7.2'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.3', 'KVM', 'default', 'Red Hat Enterprise Linux 7.3'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.4', 'KVM', 'default', 'Red Hat Enterprise Linux 7.4'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.5', 'KVM', 'default', 'Red Hat Enterprise Linux 7.5'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 7.6', 'KVM', 'default', 'Red Hat Enterprise Linux 7.6'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Red Hat Enterprise Linux 8', 'KVM', 'default', 'Red Hat Enterprise Linux 8'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 17.04', 'KVM', 'default', 'Ubuntu 17.04'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 17.10', 'KVM', 'default', 'Ubuntu 17.10'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 18.04 LTS', 'KVM', 'default', 'Ubuntu 18.04 LTS'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 18.10', 'KVM', 'default', 'Ubuntu 18.10'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Ubuntu 19.04', 'KVM', 'default', 'Ubuntu 19.04'); + +-- DPDK client and server mode support +ALTER TABLE `cloud`.`service_offering_details` CHANGE COLUMN `value` `value` TEXT NOT NULL; + +ALTER TABLE `cloud`.`vpc_offerings` ADD COLUMN `sort_key` int(32) NOT NULL default 0 COMMENT 'sort key used for customising sort method'; + +-- Add `sort_key` column to data_center +ALTER TABLE `cloud`.`data_center` ADD COLUMN `sort_key` INT(32) NOT NULL DEFAULT 0; + +-- Move domain_id to disk offering details and drop the domain_id column +INSERT INTO `cloud`.`disk_offering_details` (offering_id, name, value, display) SELECT id, 'domainid', domain_id, 0 FROM `cloud`.`disk_offering` WHERE domain_id IS NOT NULL AND type='Disk'; +INSERT INTO `cloud`.`service_offering_details` (service_offering_id, name, value, display) SELECT id, 'domainid', domain_id, 0 FROM `cloud`.`disk_offering` WHERE domain_id IS NOT NULL AND type='Service'; + +ALTER TABLE `cloud`.`disk_offering` DROP COLUMN `domain_id`; + +ALTER TABLE `cloud`.`service_offering_details` DROP FOREIGN KEY `fk_service_offering_details__service_offering_id`, DROP KEY `uk_service_offering_id_name`; +ALTER TABLE `cloud`.`service_offering_details` ADD CONSTRAINT `fk_service_offering_details__service_offering_id` FOREIGN KEY (`service_offering_id`) REFERENCES `service_offering`(`id`) ON DELETE CASCADE; + +-- Disk offering with multi-domains and multi-zones +DROP VIEW IF EXISTS `cloud`.`disk_offering_view`; +CREATE VIEW `cloud`.`disk_offering_view` AS + SELECT + `disk_offering`.`id` AS `id`, + `disk_offering`.`uuid` AS `uuid`, + `disk_offering`.`name` AS `name`, + `disk_offering`.`display_text` AS `display_text`, + `disk_offering`.`provisioning_type` AS `provisioning_type`, + `disk_offering`.`disk_size` AS `disk_size`, + `disk_offering`.`min_iops` AS `min_iops`, + `disk_offering`.`max_iops` AS `max_iops`, + `disk_offering`.`created` AS `created`, + `disk_offering`.`tags` AS `tags`, + `disk_offering`.`customized` AS `customized`, + `disk_offering`.`customized_iops` AS `customized_iops`, + `disk_offering`.`removed` AS `removed`, + `disk_offering`.`use_local_storage` AS `use_local_storage`, + `disk_offering`.`system_use` AS `system_use`, + `disk_offering`.`hv_ss_reserve` AS `hv_ss_reserve`, + `disk_offering`.`bytes_read_rate` AS `bytes_read_rate`, + `disk_offering`.`bytes_read_rate_max` AS `bytes_read_rate_max`, + `disk_offering`.`bytes_read_rate_max_length` AS `bytes_read_rate_max_length`, + `disk_offering`.`bytes_write_rate` AS `bytes_write_rate`, + `disk_offering`.`bytes_write_rate_max` AS `bytes_write_rate_max`, + `disk_offering`.`bytes_write_rate_max_length` AS `bytes_write_rate_max_length`, + `disk_offering`.`iops_read_rate` AS `iops_read_rate`, + `disk_offering`.`iops_read_rate_max` AS `iops_read_rate_max`, + `disk_offering`.`iops_read_rate_max_length` AS `iops_read_rate_max_length`, + `disk_offering`.`iops_write_rate` AS `iops_write_rate`, + `disk_offering`.`iops_write_rate_max` AS `iops_write_rate_max`, + `disk_offering`.`iops_write_rate_max_length` AS `iops_write_rate_max_length`, + `disk_offering`.`cache_mode` AS `cache_mode`, + `disk_offering`.`sort_key` AS `sort_key`, + `disk_offering`.`type` AS `type`, + `disk_offering`.`display_offering` AS `display_offering`, + `disk_offering`.`state` AS `state`, + GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id, + GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid, + GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name, + GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path, + GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id, + GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid, + GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name + FROM + `cloud`.`disk_offering` + LEFT JOIN + `cloud`.`disk_offering_details` AS `domain_details` ON `domain_details`.`offering_id` = `disk_offering`.`id` AND `domain_details`.`name`='domainid' + LEFT JOIN + `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`) + LEFT JOIN + `cloud`.`disk_offering_details` AS `zone_details` ON `zone_details`.`offering_id` = `disk_offering`.`id` AND `zone_details`.`name`='zoneid' + LEFT JOIN + `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`) + WHERE + `disk_offering`.`state`='Active' + GROUP BY + `disk_offering`.`id`; + +-- Service offering with multi-domains and multi-zones +DROP VIEW IF EXISTS `cloud`.`service_offering_view`; +CREATE VIEW `cloud`.`service_offering_view` AS + SELECT + `service_offering`.`id` AS `id`, + `disk_offering`.`uuid` AS `uuid`, + `disk_offering`.`name` AS `name`, + `disk_offering`.`display_text` AS `display_text`, + `disk_offering`.`provisioning_type` AS `provisioning_type`, + `disk_offering`.`created` AS `created`, + `disk_offering`.`tags` AS `tags`, + `disk_offering`.`removed` AS `removed`, + `disk_offering`.`use_local_storage` AS `use_local_storage`, + `disk_offering`.`system_use` AS `system_use`, + `disk_offering`.`customized_iops` AS `customized_iops`, + `disk_offering`.`min_iops` AS `min_iops`, + `disk_offering`.`max_iops` AS `max_iops`, + `disk_offering`.`hv_ss_reserve` AS `hv_ss_reserve`, + `disk_offering`.`bytes_read_rate` AS `bytes_read_rate`, + `disk_offering`.`bytes_read_rate_max` AS `bytes_read_rate_max`, + `disk_offering`.`bytes_read_rate_max_length` AS `bytes_read_rate_max_length`, + `disk_offering`.`bytes_write_rate` AS `bytes_write_rate`, + `disk_offering`.`bytes_write_rate_max` AS `bytes_write_rate_max`, + `disk_offering`.`bytes_write_rate_max_length` AS `bytes_write_rate_max_length`, + `disk_offering`.`iops_read_rate` AS `iops_read_rate`, + `disk_offering`.`iops_read_rate_max` AS `iops_read_rate_max`, + `disk_offering`.`iops_read_rate_max_length` AS `iops_read_rate_max_length`, + `disk_offering`.`iops_write_rate` AS `iops_write_rate`, + `disk_offering`.`iops_write_rate_max` AS `iops_write_rate_max`, + `disk_offering`.`iops_write_rate_max_length` AS `iops_write_rate_max_length`, + `disk_offering`.`cache_mode` AS `cache_mode`, + `service_offering`.`cpu` AS `cpu`, + `service_offering`.`speed` AS `speed`, + `service_offering`.`ram_size` AS `ram_size`, + `service_offering`.`nw_rate` AS `nw_rate`, + `service_offering`.`mc_rate` AS `mc_rate`, + `service_offering`.`ha_enabled` AS `ha_enabled`, + `service_offering`.`limit_cpu_use` AS `limit_cpu_use`, + `service_offering`.`host_tag` AS `host_tag`, + `service_offering`.`default_use` AS `default_use`, + `service_offering`.`vm_type` AS `vm_type`, + `service_offering`.`sort_key` AS `sort_key`, + `service_offering`.`is_volatile` AS `is_volatile`, + `service_offering`.`deployment_planner` AS `deployment_planner`, + GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id, + GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid, + GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name, + GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path, + GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id, + GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid, + GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name + FROM + `cloud`.`service_offering` + INNER JOIN + `cloud`.`disk_offering_view` AS `disk_offering` ON service_offering.id = disk_offering.id + LEFT JOIN + `cloud`.`service_offering_details` AS `domain_details` ON `domain_details`.`service_offering_id` = `disk_offering`.`id` AND `domain_details`.`name`='domainid' + LEFT JOIN + `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`) + LEFT JOIN + `cloud`.`service_offering_details` AS `zone_details` ON `zone_details`.`service_offering_id` = `disk_offering`.`id` AND `zone_details`.`name`='zoneid' + LEFT JOIN + `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`) + WHERE + `disk_offering`.`state`='Active' + GROUP BY + `service_offering`.`id`; + +-- Add display column for network offering details table +ALTER TABLE `cloud`.`network_offering_details` ADD COLUMN `display` tinyint(1) NOT NULL DEFAULT '1' COMMENT 'True if the detail can be displayed to the end user'; + +-- Network offering with multi-domains and multi-zones +DROP VIEW IF EXISTS `cloud`.`network_offering_view`; +CREATE VIEW `cloud`.`network_offering_view` AS + SELECT + `network_offerings`.`id` AS `id`, + `network_offerings`.`uuid` AS `uuid`, + `network_offerings`.`name` AS `name`, + `network_offerings`.`unique_name` AS `unique_name`, + `network_offerings`.`display_text` AS `display_text`, + `network_offerings`.`nw_rate` AS `nw_rate`, + `network_offerings`.`mc_rate` AS `mc_rate`, + `network_offerings`.`traffic_type` AS `traffic_type`, + `network_offerings`.`tags` AS `tags`, + `network_offerings`.`system_only` AS `system_only`, + `network_offerings`.`specify_vlan` AS `specify_vlan`, + `network_offerings`.`service_offering_id` AS `service_offering_id`, + `network_offerings`.`conserve_mode` AS `conserve_mode`, + `network_offerings`.`created` AS `created`, + `network_offerings`.`removed` AS `removed`, + `network_offerings`.`default` AS `default`, + `network_offerings`.`availability` AS `availability`, + `network_offerings`.`dedicated_lb_service` AS `dedicated_lb_service`, + `network_offerings`.`shared_source_nat_service` AS `shared_source_nat_service`, + `network_offerings`.`sort_key` AS `sort_key`, + `network_offerings`.`redundant_router_service` AS `redundant_router_service`, + `network_offerings`.`state` AS `state`, + `network_offerings`.`guest_type` AS `guest_type`, + `network_offerings`.`elastic_ip_service` AS `elastic_ip_service`, + `network_offerings`.`eip_associate_public_ip` AS `eip_associate_public_ip`, + `network_offerings`.`elastic_lb_service` AS `elastic_lb_service`, + `network_offerings`.`specify_ip_ranges` AS `specify_ip_ranges`, + `network_offerings`.`inline` AS `inline`, + `network_offerings`.`is_persistent` AS `is_persistent`, + `network_offerings`.`internal_lb` AS `internal_lb`, + `network_offerings`.`public_lb` AS `public_lb`, + `network_offerings`.`egress_default_policy` AS `egress_default_policy`, + `network_offerings`.`concurrent_connections` AS `concurrent_connections`, + `network_offerings`.`keep_alive_enabled` AS `keep_alive_enabled`, + `network_offerings`.`supports_streched_l2` AS `supports_streched_l2`, + `network_offerings`.`supports_public_access` AS `supports_public_access`, + `network_offerings`.`for_vpc` AS `for_vpc`, + `network_offerings`.`service_package_id` AS `service_package_id`, + GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id, + GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid, + GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name, + GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path, + GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id, + GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid, + GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name + FROM + `cloud`.`network_offerings` + LEFT JOIN + `cloud`.`network_offering_details` AS `domain_details` ON `domain_details`.`network_offering_id` = `network_offerings`.`id` AND `domain_details`.`name`='domainid' + LEFT JOIN + `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`) + LEFT JOIN + `cloud`.`network_offering_details` AS `zone_details` ON `zone_details`.`network_offering_id` = `network_offerings`.`id` AND `zone_details`.`name`='zoneid' + LEFT JOIN + `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`) + GROUP BY + `network_offerings`.`id`; + +-- Create VPC offering details table +CREATE TABLE `vpc_offering_details` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `offering_id` bigint(20) unsigned NOT NULL COMMENT 'vpc offering id', + `name` varchar(255) NOT NULL, + `value` varchar(1024) NOT NULL, + `display` tinyint(1) NOT NULL DEFAULT '1' COMMENT 'True if the detail can be displayed to the end user', + PRIMARY KEY (`id`), + KEY `fk_vpc_offering_details__vpc_offering_id` (`offering_id`), + CONSTRAINT `fk_vpc_offering_details__vpc_offering_id` FOREIGN KEY (`offering_id`) REFERENCES `vpc_offerings` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- VPC offering with multi-domains and multi-zones +DROP VIEW IF EXISTS `cloud`.`vpc_offering_view`; +CREATE VIEW `cloud`.`vpc_offering_view` AS + SELECT + `vpc_offerings`.`id` AS `id`, + `vpc_offerings`.`uuid` AS `uuid`, + `vpc_offerings`.`name` AS `name`, + `vpc_offerings`.`unique_name` AS `unique_name`, + `vpc_offerings`.`display_text` AS `display_text`, + `vpc_offerings`.`state` AS `state`, + `vpc_offerings`.`default` AS `default`, + `vpc_offerings`.`created` AS `created`, + `vpc_offerings`.`removed` AS `removed`, + `vpc_offerings`.`service_offering_id` AS `service_offering_id`, + `vpc_offerings`.`supports_distributed_router` AS `supports_distributed_router`, + `vpc_offerings`.`supports_region_level_vpc` AS `supports_region_level_vpc`, + `vpc_offerings`.`redundant_router_service` AS `redundant_router_service`, + `vpc_offerings`.`sort_key` AS `sort_key`, + GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id, + GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid, + GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name, + GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path, + GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id, + GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid, + GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name + FROM + `cloud`.`vpc_offerings` + LEFT JOIN + `cloud`.`vpc_offering_details` AS `domain_details` ON `domain_details`.`offering_id` = `vpc_offerings`.`id` AND `domain_details`.`name`='domainid' + LEFT JOIN + `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`) + LEFT JOIN + `cloud`.`vpc_offering_details` AS `zone_details` ON `zone_details`.`offering_id` = `vpc_offerings`.`id` AND `zone_details`.`name`='zoneid' + LEFT JOIN + `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`) + GROUP BY + `vpc_offerings`.`id`; + +-- Recreate data_center_view +DROP VIEW IF EXISTS `cloud`.`data_center_view`; +CREATE VIEW `cloud`.`data_center_view` AS + select + data_center.id, + data_center.uuid, + data_center.name, + data_center.is_security_group_enabled, + data_center.is_local_storage_enabled, + data_center.description, + data_center.dns1, + data_center.dns2, + data_center.ip6_dns1, + data_center.ip6_dns2, + data_center.internal_dns1, + data_center.internal_dns2, + data_center.guest_network_cidr, + data_center.domain, + data_center.networktype, + data_center.allocation_state, + data_center.zone_token, + data_center.dhcp_provider, + data_center.removed, + data_center.sort_key, + domain.id domain_id, + domain.uuid domain_uuid, + domain.name domain_name, + domain.path domain_path, + dedicated_resources.affinity_group_id, + dedicated_resources.account_id, + affinity_group.uuid affinity_group_uuid + from + `cloud`.`data_center` + left join + `cloud`.`domain` ON data_center.domain_id = domain.id + left join + `cloud`.`dedicated_resources` ON data_center.id = dedicated_resources.data_center_id + left join + `cloud`.`affinity_group` ON dedicated_resources.affinity_group_id = affinity_group.id; + +-- Remove key/value tags from project_view +DROP VIEW IF EXISTS `cloud`.`project_view`; +CREATE VIEW `cloud`.`project_view` AS + select + projects.id, + projects.uuid, + projects.name, + projects.display_text, + projects.state, + projects.removed, + projects.created, + projects.project_account_id, + account.account_name owner, + pacct.account_id, + domain.id domain_id, + domain.uuid domain_uuid, + domain.name domain_name, + domain.path domain_path + from + `cloud`.`projects` + inner join + `cloud`.`domain` ON projects.domain_id = domain.id + inner join + `cloud`.`project_account` ON projects.id = project_account.project_id + and project_account.account_role = 'Admin' + inner join + `cloud`.`account` ON account.id = project_account.account_id + left join + `cloud`.`project_account` pacct ON projects.id = pacct.project_id; + +-- KVM: Add background task to upload certificates for direct download +CREATE TABLE `cloud`.`direct_download_certificate` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `uuid` varchar(40) NOT NULL, + `alias` varchar(255) NOT NULL, + `certificate` text NOT NULL, + `hypervisor_type` varchar(45) NOT NULL, + `zone_id` bigint(20) unsigned NOT NULL, + PRIMARY KEY (`id`), + KEY `i_direct_download_certificate_alias` (`alias`), + KEY `fk_direct_download_certificate__zone_id` (`zone_id`), + CONSTRAINT `fk_direct_download_certificate__zone_id` FOREIGN KEY (`zone_id`) REFERENCES `data_center` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`direct_download_certificate_host_map` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `certificate_id` bigint(20) unsigned NOT NULL, + `host_id` bigint(20) unsigned NOT NULL, + `revoked` int(1) NOT NULL DEFAULT 0, + PRIMARY KEY (`id`), + KEY `fk_direct_download_certificate_host_map__host_id` (`host_id`), + KEY `fk_direct_download_certificate_host_map__certificate_id` (`certificate_id`), + CONSTRAINT `fk_direct_download_certificate_host_map__host_id` FOREIGN KEY (`host_id`) REFERENCES `host` (`id`) ON DELETE CASCADE, + CONSTRAINT `fk_direct_download_certificate_host_map__certificate_id` FOREIGN KEY (`certificate_id`) REFERENCES `direct_download_certificate` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- [Vmware] Allow configuring appliances on the VM instance wizard when OVF properties are available +CREATE TABLE `cloud`.`template_ovf_properties` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `template_id` bigint(20) unsigned NOT NULL, + `key` VARCHAR(100) NOT NULL, + `type` VARCHAR(45) DEFAULT NULL, + `value` VARCHAR(100) DEFAULT NULL, + `password` TINYINT(1) NOT NULL DEFAULT '0', + `qualifiers` TEXT DEFAULT NULL, + `user_configurable` TINYINT(1) NOT NULL DEFAULT '0', + `label` TEXT DEFAULT NULL, + `description` TEXT DEFAULT NULL, + PRIMARY KEY (`id`), + CONSTRAINT `fk_template_ovf_properties__template_id` FOREIGN KEY (`template_id`) REFERENCES `vm_template`(`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- Add VM snapshot ID on usage helper tables +ALTER TABLE `cloud_usage`.`usage_vmsnapshot` ADD COLUMN `vm_snapshot_id` BIGINT(20) NULL DEFAULT NULL AFTER `processed`; +ALTER TABLE `cloud_usage`.`usage_snapshot_on_primary` ADD COLUMN `vm_snapshot_id` BIGINT(20) NULL DEFAULT NULL AFTER `deleted`; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41202to41203-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41202to41203-cleanup.sql new file mode 100644 index 000000000000..2e6817e906b2 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41202to41203-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.12.0.2 to 4.12.0.3 +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41202to41203.sql b/engine/schema/src/main/resources/META-INF/db/schema-41202to41203.sql new file mode 100644 index 000000000000..d0697a2e4161 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41202to41203.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.12.0.2 to 4.12.0.3 +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41203to41204-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41203to41204-cleanup.sql new file mode 100644 index 000000000000..f330ef6e0a87 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41203to41204-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.12.0.3 to 4.12.0.4 +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41203to41204.sql b/engine/schema/src/main/resources/META-INF/db/schema-41203to41204.sql new file mode 100644 index 000000000000..49d2a3454de1 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41203to41204.sql @@ -0,0 +1,23 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.12.0.3 to 4.12.0.4 +--; +INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, created) VALUES (283, UUID(), 10, 'Citrix ADC VPX', utc_timestamp()); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.1.2', 'Other install media', 283, utc_timestamp(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'Xenserver', '7.6.0', 'Other install media', 283, utc_timestamp(), 0); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41204to41205-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41204to41205-cleanup.sql new file mode 100644 index 000000000000..94a5a4613805 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41204to41205-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.12.0.4 to 4.12.0.5 +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41204to41205.sql b/engine/schema/src/main/resources/META-INF/db/schema-41204to41205.sql new file mode 100644 index 000000000000..e90cb53897b8 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41204to41205.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.12.0.4 to 4.12.0.5 +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41205to41206-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41205to41206-cleanup.sql new file mode 100644 index 000000000000..7bb083d3bd6e --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41205to41206-cleanup.sql @@ -0,0 +1,50 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.12.0.5 to 4.12.0.6 +--; + +-- Remove key/value tags from project_view +DROP VIEW IF EXISTS `cloud`.`project_view`; +CREATE VIEW `cloud`.`project_view` AS + select + projects.id, + projects.uuid, + projects.name, + projects.display_text, + projects.state, + projects.removed, + projects.created, + projects.project_account_id, + account.account_name owner, + pacct.account_id, + domain.id domain_id, + domain.uuid domain_uuid, + domain.name domain_name, + domain.path domain_path + from + `cloud`.`projects` + inner join + `cloud`.`domain` ON projects.domain_id = domain.id + inner join + `cloud`.`project_account` ON projects.id = project_account.project_id + and project_account.account_role = 'Admin' + inner join + `cloud`.`account` ON account.id = project_account.account_id + left join + `cloud`.`project_account` pacct ON projects.id = pacct.project_id; \ No newline at end of file diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41205to41206.sql b/engine/schema/src/main/resources/META-INF/db/schema-41205to41206.sql new file mode 100644 index 000000000000..5f11dee9f852 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41205to41206.sql @@ -0,0 +1,38 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.12.0.5 to 4.12.0.6 +--; + +-- Add XenServer 8.1 hypervisor capabilities +INSERT INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, storage_motion_supported) +values (UUID(), 'Citrix Hypervisor', '8.0.0', 500, 13, 1); +INSERT INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, storage_motion_supported) +values (UUID(), 'Citrix Hypervisor', '8.1.0', 500, 13, 1); + +-- Copy XenServer 7.6 hypervisor guest OS mappings to XenServer 8.0 +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) +SELECT UUID(),'Citrix Hypervisor', '8.0.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.6.0'; + +-- Add New XenServer 8.0 Guest OSes +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) +VALUES (UUID(), 'Citrix Hypervisor', '8.0.0', 'Windows Server 2019 (64-bit)', 276, now(), 0); + +-- Copy XenServer 8.0 hypervisor guest OS mappings to XenServer 8.1 +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) +SELECT UUID(),'Citrix Hypervisor', '8.1.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Citrix Hypervisor' AND hypervisor_version='8.0.0'; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41206to41207-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41206to41207-cleanup.sql new file mode 100644 index 000000000000..95473c23789c --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41206to41207-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.12.0.6 to 4.12.0.7 +--; \ No newline at end of file diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41206to41207.sql b/engine/schema/src/main/resources/META-INF/db/schema-41206to41207.sql new file mode 100644 index 000000000000..204f6af8919c --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41206to41207.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.12.0.6 to 4.12.0.7 +--; \ No newline at end of file diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41207to41208-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41207to41208-cleanup.sql new file mode 100644 index 000000000000..95473c23789c --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41207to41208-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.12.0.6 to 4.12.0.7 +--; \ No newline at end of file diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41207to41208.sql b/engine/schema/src/main/resources/META-INF/db/schema-41207to41208.sql new file mode 100644 index 000000000000..204f6af8919c --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41207to41208.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.12.0.6 to 4.12.0.7 +--; \ No newline at end of file diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41208to41209-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41208to41209-cleanup.sql new file mode 100644 index 000000000000..95473c23789c --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41208to41209-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.12.0.6 to 4.12.0.7 +--; \ No newline at end of file diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41208to41209.sql b/engine/schema/src/main/resources/META-INF/db/schema-41208to41209.sql new file mode 100644 index 000000000000..204f6af8919c --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41208to41209.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.12.0.6 to 4.12.0.7 +--; \ No newline at end of file diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41209to412010-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41209to412010-cleanup.sql new file mode 100644 index 000000000000..ed9aba697ec3 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41209to412010-cleanup.sql @@ -0,0 +1,127 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.12.0.6 to 4.12.0.7 +--; + +-- Changes to template_view +DROP VIEW IF EXISTS `cloud`.`template_view`; +CREATE VIEW `cloud`.`template_view` AS + SELECT + `vm_template`.`id` AS `id`, + `vm_template`.`uuid` AS `uuid`, + `vm_template`.`unique_name` AS `unique_name`, + `vm_template`.`name` AS `name`, + `vm_template`.`public` AS `public`, + `vm_template`.`featured` AS `featured`, + `vm_template`.`type` AS `type`, + `vm_template`.`hvm` AS `hvm`, + `vm_template`.`boot_filename` AS `boot_filename`, + `vm_template`.`bits` AS `bits`, + `vm_template`.`url` AS `url`, + `vm_template`.`format` AS `format`, + `vm_template`.`created` AS `created`, + `vm_template`.`checksum` AS `checksum`, + `vm_template`.`display_text` AS `display_text`, + `vm_template`.`enable_password` AS `enable_password`, + `vm_template`.`dynamically_scalable` AS `dynamically_scalable`, + `vm_template`.`state` AS `template_state`, + `vm_template`.`guest_os_id` AS `guest_os_id`, + `guest_os`.`uuid` AS `guest_os_uuid`, + `guest_os`.`display_name` AS `guest_os_name`, + `vm_template`.`bootable` AS `bootable`, + `vm_template`.`prepopulate` AS `prepopulate`, + `vm_template`.`cross_zones` AS `cross_zones`, + `vm_template`.`hypervisor_type` AS `hypervisor_type`, + `vm_template`.`extractable` AS `extractable`, + `vm_template`.`template_tag` AS `template_tag`, + `vm_template`.`sort_key` AS `sort_key`, + `vm_template`.`removed` AS `removed`, + `vm_template`.`enable_sshkey` AS `enable_sshkey`, + `parent_template`.`id` AS `parent_template_id`, + `parent_template`.`uuid` AS `parent_template_uuid`, + `source_template`.`id` AS `source_template_id`, + `source_template`.`uuid` AS `source_template_uuid`, + `account`.`id` AS `account_id`, + `account`.`uuid` AS `account_uuid`, + `account`.`account_name` AS `account_name`, + `account`.`type` AS `account_type`, + `domain`.`id` AS `domain_id`, + `domain`.`uuid` AS `domain_uuid`, + `domain`.`name` AS `domain_name`, + `domain`.`path` AS `domain_path`, + `projects`.`id` AS `project_id`, + `projects`.`uuid` AS `project_uuid`, + `projects`.`name` AS `project_name`, + `data_center`.`id` AS `data_center_id`, + `data_center`.`uuid` AS `data_center_uuid`, + `data_center`.`name` AS `data_center_name`, + `launch_permission`.`account_id` AS `lp_account_id`, + `template_store_ref`.`store_id` AS `store_id`, + `image_store`.`scope` AS `store_scope`, + `template_store_ref`.`state` AS `state`, + `template_store_ref`.`download_state` AS `download_state`, + `template_store_ref`.`download_pct` AS `download_pct`, + `template_store_ref`.`error_str` AS `error_str`, + `template_store_ref`.`size` AS `size`, + `template_store_ref`.physical_size AS `physical_size`, + `template_store_ref`.`destroyed` AS `destroyed`, + `template_store_ref`.`created` AS `created_on_store`, + `vm_template_details`.`name` AS `detail_name`, + `vm_template_details`.`value` AS `detail_value`, + `resource_tags`.`id` AS `tag_id`, + `resource_tags`.`uuid` AS `tag_uuid`, + `resource_tags`.`key` AS `tag_key`, + `resource_tags`.`value` AS `tag_value`, + `resource_tags`.`domain_id` AS `tag_domain_id`, + `domain`.`uuid` AS `tag_domain_uuid`, + `domain`.`name` AS `tag_domain_name`, + `resource_tags`.`account_id` AS `tag_account_id`, + `account`.`account_name` AS `tag_account_name`, + `resource_tags`.`resource_id` AS `tag_resource_id`, + `resource_tags`.`resource_uuid` AS `tag_resource_uuid`, + `resource_tags`.`resource_type` AS `tag_resource_type`, + `resource_tags`.`customer` AS `tag_customer`, + CONCAT(`vm_template`.`id`, + '_', + IFNULL(`data_center`.`id`, 0)) AS `temp_zone_pair`, + `vm_template`.`direct_download` AS `direct_download` + FROM + (((((((((((((`vm_template` + JOIN `guest_os` ON ((`guest_os`.`id` = `vm_template`.`guest_os_id`))) + JOIN `account` ON ((`account`.`id` = `vm_template`.`account_id`))) + JOIN `domain` ON ((`domain`.`id` = `account`.`domain_id`))) + LEFT JOIN `projects` ON ((`projects`.`project_account_id` = `account`.`id`))) + LEFT JOIN `vm_template_details` ON ((`vm_template_details`.`template_id` = `vm_template`.`id`))) + LEFT JOIN `vm_template` `source_template` ON ((`source_template`.`id` = `vm_template`.`source_template_id`))) + LEFT JOIN `template_store_ref` ON (((`template_store_ref`.`template_id` = `vm_template`.`id`) + AND (`template_store_ref`.`store_role` = 'Image') + AND (`template_store_ref`.`destroyed` = 0)))) + LEFT JOIN `vm_template` `parent_template` ON ((`parent_template`.`id` = `vm_template`.`parent_template_id`))) + LEFT JOIN `image_store` ON ((ISNULL(`image_store`.`removed`) + AND (`template_store_ref`.`store_id` IS NOT NULL) + AND (`image_store`.`id` = `template_store_ref`.`store_id`)))) + LEFT JOIN `template_zone_ref` ON (((`template_zone_ref`.`template_id` = `vm_template`.`id`) + AND ISNULL(`template_store_ref`.`store_id`) + AND ISNULL(`template_zone_ref`.`removed`)))) + LEFT JOIN `data_center` ON (((`image_store`.`data_center_id` = `data_center`.`id`) + OR (`template_zone_ref`.`zone_id` = `data_center`.`id`)))) + LEFT JOIN `launch_permission` ON ((`launch_permission`.`template_id` = `vm_template`.`id`))) + LEFT JOIN `resource_tags` ON (((`resource_tags`.`resource_id` = `vm_template`.`id`) + AND ((`resource_tags`.`resource_type` = 'Template') + OR (`resource_tags`.`resource_type` = 'ISO'))))); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41209to412010.sql b/engine/schema/src/main/resources/META-INF/db/schema-41209to412010.sql new file mode 100644 index 000000000000..57b53e4de5af --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41209to412010.sql @@ -0,0 +1,32 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.12.0.6 to 4.12.0.7 +--; + +-- Adding Pxe Boot variables +ALTER TABLE `cloud`.`vm_template` ADD `boot_filename` varchar(255) NULL default NULL COMMENT 'the url where the template exists externally'; +ALTER TABLE `cloud`.`vpc` ADD `network_boot_ip` char(40) NULL default NULL COMMENT 'Network Boot Ip'; + +-- Add XenServer 8.2 hypervisor capabilities +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, storage_motion_supported) +values (UUID(), 'Citrix Hypervisor', '8.2.0', 500, 13, 1); + +-- Copy XenServer 8.1 hypervisor guest OS mappings to XenServer 8.2 +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) +SELECT UUID(),'Citrix Hypervisor', '8.2.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Citrix Hypervisor' AND hypervisor_version='8.1.0'; \ No newline at end of file diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41210to412011-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41210to412011-cleanup.sql new file mode 100644 index 000000000000..95473c23789c --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41210to412011-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.12.0.6 to 4.12.0.7 +--; \ No newline at end of file diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41210to412011.sql b/engine/schema/src/main/resources/META-INF/db/schema-41210to412011.sql new file mode 100644 index 000000000000..204f6af8919c --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41210to412011.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.12.0.6 to 4.12.0.7 +--; \ No newline at end of file diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41211to412012-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41211to412012-cleanup.sql new file mode 100644 index 000000000000..2fd0e630fde2 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41211to412012-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.12.0.11 to 4.12.0.12 +--; \ No newline at end of file diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41211to412012.sql b/engine/schema/src/main/resources/META-INF/db/schema-41211to412012.sql new file mode 100644 index 000000000000..735dc776fcfc --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41211to412012.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.12.0.11 to 4.12.0.12 +--; \ No newline at end of file diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41212to412013-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41212to412013-cleanup.sql new file mode 100644 index 000000000000..2fd0e630fde2 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41212to412013-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.12.0.11 to 4.12.0.12 +--; \ No newline at end of file diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41212to412013.sql b/engine/schema/src/main/resources/META-INF/db/schema-41212to412013.sql new file mode 100644 index 000000000000..735dc776fcfc --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-41212to412013.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.12.0.11 to 4.12.0.12 +--; \ No newline at end of file diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41710to41800.sql b/engine/schema/src/main/resources/META-INF/db/schema-41710to41800.sql index 4ec812cc1c0a..93b7669482b7 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41710to41800.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41710to41800.sql @@ -27,3 +27,274 @@ WHERE so.default_use = 1 AND so.vm_type IN ('domainrouter', 'secondarystoragevm' -- Add cidr_list column to load_balancing_rules ALTER TABLE `cloud`.`load_balancing_rules` ADD cidr_list VARCHAR(4096); + +-- Fixes for custom schema changes +DROP VIEW IF EXISTS `cloud`.`template_view`; +CREATE VIEW `cloud`.`template_view` AS + SELECT + `vm_template`.`id` AS `id`, + `vm_template`.`uuid` AS `uuid`, + `vm_template`.`unique_name` AS `unique_name`, + `vm_template`.`name` AS `name`, + `vm_template`.`public` AS `public`, + `vm_template`.`featured` AS `featured`, + `vm_template`.`type` AS `type`, + `vm_template`.`hvm` AS `hvm`, + `vm_template`.`bits` AS `bits`, + `vm_template`.`url` AS `url`, + `vm_template`.`format` AS `format`, + `vm_template`.`created` AS `created`, + `vm_template`.`checksum` AS `checksum`, + `vm_template`.`display_text` AS `display_text`, + `vm_template`.`enable_password` AS `enable_password`, + `vm_template`.`dynamically_scalable` AS `dynamically_scalable`, + `vm_template`.`state` AS `template_state`, + `vm_template`.`guest_os_id` AS `guest_os_id`, + `guest_os`.`uuid` AS `guest_os_uuid`, + `guest_os`.`display_name` AS `guest_os_name`, + `vm_template`.`bootable` AS `bootable`, + `vm_template`.`prepopulate` AS `prepopulate`, + `vm_template`.`cross_zones` AS `cross_zones`, + `vm_template`.`hypervisor_type` AS `hypervisor_type`, + `vm_template`.`extractable` AS `extractable`, + `vm_template`.`template_tag` AS `template_tag`, + `vm_template`.`sort_key` AS `sort_key`, + `vm_template`.`removed` AS `removed`, + `vm_template`.`enable_sshkey` AS `enable_sshkey`, + `vm_template`.`boot_filename` AS `boot_filename`, + `parent_template`.`id` AS `parent_template_id`, + `parent_template`.`uuid` AS `parent_template_uuid`, + `source_template`.`id` AS `source_template_id`, + `source_template`.`uuid` AS `source_template_uuid`, + `account`.`id` AS `account_id`, + `account`.`uuid` AS `account_uuid`, + `account`.`account_name` AS `account_name`, + `account`.`type` AS `account_type`, + `domain`.`id` AS `domain_id`, + `domain`.`uuid` AS `domain_uuid`, + `domain`.`name` AS `domain_name`, + `domain`.`path` AS `domain_path`, + `projects`.`id` AS `project_id`, + `projects`.`uuid` AS `project_uuid`, + `projects`.`name` AS `project_name`, + `data_center`.`id` AS `data_center_id`, + `data_center`.`uuid` AS `data_center_uuid`, + `data_center`.`name` AS `data_center_name`, + `launch_permission`.`account_id` AS `lp_account_id`, + `template_store_ref`.`store_id` AS `store_id`, + `image_store`.`scope` AS `store_scope`, + `template_store_ref`.`state` AS `state`, + `template_store_ref`.`download_state` AS `download_state`, + `template_store_ref`.`download_pct` AS `download_pct`, + `template_store_ref`.`error_str` AS `error_str`, + `template_store_ref`.`size` AS `size`, + `template_store_ref`.physical_size AS `physical_size`, + `template_store_ref`.`destroyed` AS `destroyed`, + `template_store_ref`.`created` AS `created_on_store`, + `vm_template_details`.`name` AS `detail_name`, + `vm_template_details`.`value` AS `detail_value`, + `resource_tags`.`id` AS `tag_id`, + `resource_tags`.`uuid` AS `tag_uuid`, + `resource_tags`.`key` AS `tag_key`, + `resource_tags`.`value` AS `tag_value`, + `resource_tags`.`domain_id` AS `tag_domain_id`, + `domain`.`uuid` AS `tag_domain_uuid`, + `domain`.`name` AS `tag_domain_name`, + `resource_tags`.`account_id` AS `tag_account_id`, + `account`.`account_name` AS `tag_account_name`, + `resource_tags`.`resource_id` AS `tag_resource_id`, + `resource_tags`.`resource_uuid` AS `tag_resource_uuid`, + `resource_tags`.`resource_type` AS `tag_resource_type`, + `resource_tags`.`customer` AS `tag_customer`, + CONCAT(`vm_template`.`id`, + '_', + IFNULL(`data_center`.`id`, 0)) AS `temp_zone_pair`, + `vm_template`.`direct_download` AS `direct_download`, + `vm_template`.`deploy_as_is` AS `deploy_as_is` + FROM + (((((((((((((`vm_template` + JOIN `guest_os` ON ((`guest_os`.`id` = `vm_template`.`guest_os_id`))) + JOIN `account` ON ((`account`.`id` = `vm_template`.`account_id`))) + JOIN `domain` ON ((`domain`.`id` = `account`.`domain_id`))) + LEFT JOIN `projects` ON ((`projects`.`project_account_id` = `account`.`id`))) + LEFT JOIN `vm_template_details` ON ((`vm_template_details`.`template_id` = `vm_template`.`id`))) + LEFT JOIN `vm_template` `source_template` ON ((`source_template`.`id` = `vm_template`.`source_template_id`))) + LEFT JOIN `template_store_ref` ON (((`template_store_ref`.`template_id` = `vm_template`.`id`) + AND (`template_store_ref`.`store_role` = 'Image') + AND (`template_store_ref`.`destroyed` = 0)))) + LEFT JOIN `vm_template` `parent_template` ON ((`parent_template`.`id` = `vm_template`.`parent_template_id`))) + LEFT JOIN `image_store` ON ((ISNULL(`image_store`.`removed`) + AND (`template_store_ref`.`store_id` IS NOT NULL) + AND (`image_store`.`id` = `template_store_ref`.`store_id`)))) + LEFT JOIN `template_zone_ref` ON (((`template_zone_ref`.`template_id` = `vm_template`.`id`) + AND ISNULL(`template_store_ref`.`store_id`) + AND ISNULL(`template_zone_ref`.`removed`)))) + LEFT JOIN `data_center` ON (((`image_store`.`data_center_id` = `data_center`.`id`) + OR (`template_zone_ref`.`zone_id` = `data_center`.`id`)))) + LEFT JOIN `launch_permission` ON ((`launch_permission`.`template_id` = `vm_template`.`id`))) + LEFT JOIN `resource_tags` ON (((`resource_tags`.`resource_id` = `vm_template`.`id`) + AND ((`resource_tags`.`resource_type` = 'Template') + OR (`resource_tags`.`resource_type` = 'ISO'))))); + +DROP VIEW IF EXISTS `cloud`.`disk_offering_view`; +CREATE VIEW `cloud`.`disk_offering_view` AS + SELECT + `disk_offering`.`id` AS `id`, + `disk_offering`.`uuid` AS `uuid`, + `disk_offering`.`name` AS `name`, + `disk_offering`.`display_text` AS `display_text`, + `disk_offering`.`provisioning_type` AS `provisioning_type`, + `disk_offering`.`disk_size` AS `disk_size`, + `disk_offering`.`min_iops` AS `min_iops`, + `disk_offering`.`max_iops` AS `max_iops`, + `disk_offering`.`created` AS `created`, + `disk_offering`.`tags` AS `tags`, + `disk_offering`.`customized` AS `customized`, + `disk_offering`.`customized_iops` AS `customized_iops`, + `disk_offering`.`removed` AS `removed`, + `disk_offering`.`use_local_storage` AS `use_local_storage`, + `disk_offering`.`hv_ss_reserve` AS `hv_ss_reserve`, + `disk_offering`.`bytes_read_rate` AS `bytes_read_rate`, + `disk_offering`.`bytes_read_rate_max` AS `bytes_read_rate_max`, + `disk_offering`.`bytes_read_rate_max_length` AS `bytes_read_rate_max_length`, + `disk_offering`.`bytes_write_rate` AS `bytes_write_rate`, + `disk_offering`.`bytes_write_rate_max` AS `bytes_write_rate_max`, + `disk_offering`.`bytes_write_rate_max_length` AS `bytes_write_rate_max_length`, + `disk_offering`.`iops_read_rate` AS `iops_read_rate`, + `disk_offering`.`iops_read_rate_max` AS `iops_read_rate_max`, + `disk_offering`.`iops_read_rate_max_length` AS `iops_read_rate_max_length`, + `disk_offering`.`iops_write_rate` AS `iops_write_rate`, + `disk_offering`.`iops_write_rate_max` AS `iops_write_rate_max`, + `disk_offering`.`iops_write_rate_max_length` AS `iops_write_rate_max_length`, + `disk_offering`.`cache_mode` AS `cache_mode`, + `disk_offering`.`sort_key` AS `sort_key`, + `disk_offering`.`compute_only` AS `compute_only`, + `disk_offering`.`display_offering` AS `display_offering`, + `disk_offering`.`state` AS `state`, + `disk_offering`.`disk_size_strictness` AS `disk_size_strictness`, + `disk_offering`.`min_iops_per_gb` AS `min_iops_per_gb`, + `disk_offering`.`max_iops_per_gb` AS `max_iops_per_gb`, + `disk_offering`.`highest_min_iops` AS `highest_min_iops`, + `disk_offering`.`highest_max_iops` AS `highest_max_iops`, + `vsphere_storage_policy`.`value` AS `vsphere_storage_policy`, + GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id, + GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid, + GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name, + GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path, + GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id, + GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid, + GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name + FROM + `cloud`.`disk_offering` + LEFT JOIN + `cloud`.`disk_offering_details` AS `domain_details` ON `domain_details`.`offering_id` = `disk_offering`.`id` AND `domain_details`.`name`='domainid' + LEFT JOIN + `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`) + LEFT JOIN + `cloud`.`disk_offering_details` AS `zone_details` ON `zone_details`.`offering_id` = `disk_offering`.`id` AND `zone_details`.`name`='zoneid' + LEFT JOIN + `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`) + LEFT JOIN + `cloud`.`disk_offering_details` AS `vsphere_storage_policy` ON `vsphere_storage_policy`.`offering_id` = `disk_offering`.`id` AND `vsphere_storage_policy`.`name` = 'storagepolicy' + WHERE + `disk_offering`.`state`='Active' + GROUP BY + `disk_offering`.`id`; + +DROP VIEW IF EXISTS `cloud`.`service_offering_view`; +CREATE VIEW `cloud`.`service_offering_view` AS + SELECT + `service_offering`.`id` AS `id`, + `service_offering`.`uuid` AS `uuid`, + `service_offering`.`name` AS `name`, + `service_offering`.`display_text` AS `display_text`, + `disk_offering`.`provisioning_type` AS `provisioning_type`, + `service_offering`.`created` AS `created`, + `disk_offering`.`tags` AS `tags`, + `service_offering`.`removed` AS `removed`, + `disk_offering`.`use_local_storage` AS `use_local_storage`, + `service_offering`.`system_use` AS `system_use`, + `disk_offering`.`id` AS `disk_offering_id`, + `disk_offering`.`name` AS `disk_offering_name`, + `disk_offering`.`uuid` AS `disk_offering_uuid`, + `disk_offering`.`display_text` AS `disk_offering_display_text`, + `disk_offering`.`customized_iops` AS `customized_iops`, + `disk_offering`.`min_iops` AS `min_iops`, + `disk_offering`.`max_iops` AS `max_iops`, + `disk_offering`.`hv_ss_reserve` AS `hv_ss_reserve`, + `disk_offering`.`bytes_read_rate` AS `bytes_read_rate`, + `disk_offering`.`bytes_read_rate_max` AS `bytes_read_rate_max`, + `disk_offering`.`bytes_read_rate_max_length` AS `bytes_read_rate_max_length`, + `disk_offering`.`bytes_write_rate` AS `bytes_write_rate`, + `disk_offering`.`bytes_write_rate_max` AS `bytes_write_rate_max`, + `disk_offering`.`bytes_write_rate_max_length` AS `bytes_write_rate_max_length`, + `disk_offering`.`iops_read_rate` AS `iops_read_rate`, + `disk_offering`.`iops_read_rate_max` AS `iops_read_rate_max`, + `disk_offering`.`iops_read_rate_max_length` AS `iops_read_rate_max_length`, + `disk_offering`.`iops_write_rate` AS `iops_write_rate`, + `disk_offering`.`iops_write_rate_max` AS `iops_write_rate_max`, + `disk_offering`.`iops_write_rate_max_length` AS `iops_write_rate_max_length`, + `disk_offering`.`cache_mode` AS `cache_mode`, + `disk_offering`.`disk_size` AS `root_disk_size`, + `disk_offering`.`min_iops_per_gb` AS `min_iops_per_gb`, + `disk_offering`.`max_iops_per_gb` AS `max_iops_per_gb`, + `disk_offering`.`highest_min_iops` AS `highest_min_iops`, + `disk_offering`.`highest_max_iops` AS `highest_max_iops`, + `service_offering`.`cpu` AS `cpu`, + `service_offering`.`speed` AS `speed`, + `service_offering`.`ram_size` AS `ram_size`, + `service_offering`.`nw_rate` AS `nw_rate`, + `service_offering`.`mc_rate` AS `mc_rate`, + `service_offering`.`ha_enabled` AS `ha_enabled`, + `service_offering`.`limit_cpu_use` AS `limit_cpu_use`, + `service_offering`.`host_tag` AS `host_tag`, + `service_offering`.`default_use` AS `default_use`, + `service_offering`.`vm_type` AS `vm_type`, + `service_offering`.`sort_key` AS `sort_key`, + `service_offering`.`is_volatile` AS `is_volatile`, + `service_offering`.`deployment_planner` AS `deployment_planner`, + `service_offering`.`dynamic_scaling_enabled` AS `dynamic_scaling_enabled`, + `service_offering`.`disk_offering_strictness` AS `disk_offering_strictness`, + `vsphere_storage_policy`.`value` AS `vsphere_storage_policy`, + GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id, + GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid, + GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name, + GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path, + GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id, + GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid, + GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name, + IFNULL(`min_compute_details`.`value`, `cpu`) AS min_cpu, + IFNULL(`max_compute_details`.`value`, `cpu`) AS max_cpu, + IFNULL(`min_memory_details`.`value`, `ram_size`) AS min_memory, + IFNULL(`max_memory_details`.`value`, `ram_size`) AS max_memory + FROM + `cloud`.`service_offering` + INNER JOIN + `cloud`.`disk_offering_view` AS `disk_offering` ON service_offering.disk_offering_id = disk_offering.id + LEFT JOIN + `cloud`.`service_offering_details` AS `domain_details` ON `domain_details`.`service_offering_id` = `service_offering`.`id` AND `domain_details`.`name`='domainid' + LEFT JOIN + `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`) + LEFT JOIN + `cloud`.`service_offering_details` AS `zone_details` ON `zone_details`.`service_offering_id` = `service_offering`.`id` AND `zone_details`.`name`='zoneid' + LEFT JOIN + `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`) + LEFT JOIN + `cloud`.`service_offering_details` AS `min_compute_details` ON `min_compute_details`.`service_offering_id` = `service_offering`.`id` + AND `min_compute_details`.`name` = 'mincpunumber' + LEFT JOIN + `cloud`.`service_offering_details` AS `max_compute_details` ON `max_compute_details`.`service_offering_id` = `service_offering`.`id` + AND `max_compute_details`.`name` = 'maxcpunumber' + LEFT JOIN + `cloud`.`service_offering_details` AS `min_memory_details` ON `min_memory_details`.`service_offering_id` = `service_offering`.`id` + AND `min_memory_details`.`name` = 'minmemory' + LEFT JOIN + `cloud`.`service_offering_details` AS `max_memory_details` ON `max_memory_details`.`service_offering_id` = `service_offering`.`id` + AND `max_memory_details`.`name` = 'maxmemory' + LEFT JOIN + `cloud`.`service_offering_details` AS `vsphere_storage_policy` ON `vsphere_storage_policy`.`service_offering_id` = `service_offering`.`id` + AND `vsphere_storage_policy`.`name` = 'storagepolicy' + WHERE + `service_offering`.`state`='Active' + GROUP BY + `service_offering`.`id`; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4930to41000.sql b/engine/schema/src/main/resources/META-INF/db/schema-4930to41000.sql index dc0cd6d4d75a..b5eb8d361250 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-4930to41000.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-4930to41000.sql @@ -49,7 +49,7 @@ INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervi INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.0', 'windows9_64Guest', 258, now(), 0); INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Windows 10', 258, now(), 0); -INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '6.5.0', 'Windows Server 2016 (64-bit)', 259, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '6.5.0', 'Other install media', 259, now(), 0); INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.0.0', 'Windows Server 2016 (64-bit)', 259, now(), 0); INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'VMware', '6.0', 'windows9Server64Guest', 259, now(), 0); INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'KVM', 'default', 'Windows Server 2016', 259, now(), 0); @@ -235,6 +235,205 @@ WHERE (o.cpu is null AND o.speed IS NULL AND o.ram_size IS NULL) AND -- CLOUDSTACK-9827: Storage tags stored in multiple places DROP VIEW IF EXISTS `cloud`.`storage_tag_view`; +-- XenServer 7.1 support update +INSERT INTO `cloud`.`hypervisor_capabilities`( + uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, storage_motion_supported) +values + (UUID(), 'XenServer', '7.1.0', 500, 13, 1); + +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 4.5 (32-bit)', 1, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 4.6 (32-bit)', 2, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 4.7 (32-bit)', 3, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 4.8 (32-bit)', 4, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 5, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 6, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 7, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 8, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 9, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 10, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 11, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 12, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 13, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 14, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 111, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 112, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 141, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 142, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 161, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 162, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 173, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 174, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 175, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 176, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 231, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 232, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (32-bit)', 139, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 5 (64-bit)', 140, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (32-bit)', 143, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (64-bit)', 144, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (32-bit)', 177, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (64-bit)', 178, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (32-bit)', 179, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (64-bit)', 180, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (32-bit)', 171, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (64-bit)', 172, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (32-bit)', 181, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (64-bit)', 182, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (32-bit)', 227, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (64-bit)', 228, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (32-bit)', 248, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (64-bit)', 249, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 7', 246, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Debian Squeeze 6.0 (32-bit)', 132, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Debian Squeeze 6.0 (64-bit)', 133, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Debian Wheezy 7.0 (32-bit)', 183, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Debian Wheezy 7.0 (64-bit)', 184, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 16, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 17, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 18, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 19, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 20, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 21, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 22, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 23, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 24, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 25, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 134, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 135, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 145, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 146, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 207, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 208, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 209, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 210, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 211, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 212, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (32-bit)', 233, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 5 (64-bit)', 234, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (32-bit)', 147, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (64-bit)', 148, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (32-bit)', 213, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (64-bit)', 214, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (32-bit)', 215, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (64-bit)', 216, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (32-bit)', 217, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (64-bit)', 218, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (32-bit)', 219, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (64-bit)', 220, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (32-bit)', 235, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (64-bit)', 236, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (32-bit)', 250, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Enterprise Linux 6 (64-bit)', 251, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Linux 7', 247, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 4.5 (32-bit)', 26, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 4.6 (32-bit)', 27, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 4.7 (32-bit)', 28, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 4.8 (32-bit)', 29, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 30, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 31, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 32, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 33, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 34, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 35, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 36, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 37, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 38, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 39, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 113, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 114, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 149, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 150, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 189, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 190, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 191, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 192, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 193, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 194, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (32-bit)', 237, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 5 (64-bit)', 238, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (32-bit)', 136, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (64-bit)', 137, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (32-bit)', 195, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (64-bit)', 196, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (32-bit)', 197, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (64-bit)', 198, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (32-bit)', 199, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (64-bit)', 204, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (32-bit)', 205, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (64-bit)', 206, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (32-bit)', 239, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (64-bit)', 240, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 7', 245, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 10 SP1 (32-bit)', 41, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 10 SP1 (64-bit)', 42, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 10 SP2 (32-bit)', 43, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 10 SP2 (64-bit)', 44, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 10 SP3 (32-bit)', 151, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 10 SP3 (64-bit)', 45, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 10 SP4 (32-bit)', 153, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 10 SP4 (64-bit)', 152, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 11 (32-bit)', 46, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 11 (64-bit)', 47, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 11 SP1 (32-bit)', 155, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 11 SP2 (32-bit)', 186, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 11 SP2 (64-bit)', 185, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 11 SP3 (32-bit)', 188, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 11 SP3 (32-bit)', 187, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 12 (64-bit)', 244, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows 7 (32-bit)', 48, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows 7 (64-bit)', 49, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows 8 (32-bit)', 165, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows 8 (64-bit)', 166, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 51, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 87, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 88, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 89, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 90, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows Server 2008 (32-bit)', 52, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows Server 2008 (64-bit)', 53, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows Server 2008 R2 (64-bit)', 54, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows Server 2012 (64-bit)', 167, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows Server 2012 R2 (64-bit)', 168, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 58, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Ubuntu Lucid Lynx 10.04 (32-bit)', 121, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Ubuntu Lucid Lynx 10.04 (64-bit)', 126, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Ubuntu Maverick Meerkat 10.10 (32-bit) (experimental)', 156, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Ubuntu Maverick Meerkat 10.10 (64-bit) (experimental)', 157, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Ubuntu Precise Pangolin 12.04 (32-bit)', 163, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Ubuntu Precise Pangolin 12.04 (64-bit)', 164, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Ubuntu Trusty Tahr 14.04', 241, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Ubuntu Trusty Tahr 14.04', 254, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 169, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 170, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 98, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 99, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 60, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 103, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 200, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 201, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 59, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 100, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 202, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Other install media', 203, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Ubuntu Trusty Tahr 14.04', 255, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Ubuntu Xenial Xerus 16.04', 256, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows 10 (32-bit)', 257, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows 10 (64-bit)', 258, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows Server 2016 (64-bit)', 259, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 7', 260, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (32-bit)', 261, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (64-bit)', 262, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (32-bit)', 263, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 6 (64-bit)', 264, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (32-bit)', 265, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (64-bit)', 266, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (32-bit)', 267, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (64-bit)', 268, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CoreOS', 271, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 7', 272, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 7', 273, now(), 0); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'CentOS 7', 274, now(), 0); + CREATE TABLE IF NOT EXISTS `cloud`.`guest_os_details` ( `id` bigint unsigned NOT NULL auto_increment, `guest_os_id` bigint unsigned NOT NULL COMMENT 'VPC gateway id', diff --git a/engine/schema/src/test/java/com/cloud/upgrade/DatabaseUpgradeCheckerTest.java b/engine/schema/src/test/java/com/cloud/upgrade/DatabaseUpgradeCheckerTest.java index 982a386161b6..f8ba468c0874 100644 --- a/engine/schema/src/test/java/com/cloud/upgrade/DatabaseUpgradeCheckerTest.java +++ b/engine/schema/src/test/java/com/cloud/upgrade/DatabaseUpgradeCheckerTest.java @@ -27,11 +27,15 @@ import com.cloud.upgrade.DatabaseUpgradeChecker.NoopDbUpgrade; import com.cloud.upgrade.dao.DbUpgrade; -import com.cloud.upgrade.dao.Upgrade41000to41100; +import com.cloud.upgrade.dao.Upgrade41000to4100226; +import com.cloud.upgrade.dao.Upgrade4100226to4100227; +import com.cloud.upgrade.dao.Upgrade4100227to4100228; +import com.cloud.upgrade.dao.Upgrade4100228to4100229; +import com.cloud.upgrade.dao.Upgrade4100229to4100230; import com.cloud.upgrade.dao.Upgrade41100to41110; import com.cloud.upgrade.dao.Upgrade41110to41120; import com.cloud.upgrade.dao.Upgrade41120to41130; -import com.cloud.upgrade.dao.Upgrade41120to41200; +import com.cloud.upgrade.dao.Upgrade41130to41200; import com.cloud.upgrade.dao.Upgrade452to453; import com.cloud.upgrade.dao.Upgrade453to460; import com.cloud.upgrade.dao.Upgrade460to461; @@ -96,14 +100,18 @@ public void testCalculateUpgradePath410to412() { assertNotNull(upgrades); assertTrue(upgrades.length >= 1); - assertTrue(upgrades[0] instanceof Upgrade41000to41100); - assertTrue(upgrades[1] instanceof Upgrade41100to41110); - assertTrue(upgrades[2] instanceof Upgrade41110to41120); - assertTrue(upgrades[3] instanceof Upgrade41120to41130); - assertTrue(upgrades[4] instanceof Upgrade41120to41200); - - assertTrue(Arrays.equals(new String[] {"4.11.0.0", "4.11.1.0"}, upgrades[1].getUpgradableVersionRange())); - assertEquals(currentVersion.toString(), upgrades[4].getUpgradedVersion()); + assertTrue(upgrades[0] instanceof Upgrade41000to4100226); + assertTrue(upgrades[1] instanceof Upgrade4100226to4100227); + assertTrue(upgrades[2] instanceof Upgrade4100227to4100228); + assertTrue(upgrades[3] instanceof Upgrade4100228to4100229); + assertTrue(upgrades[4] instanceof Upgrade4100229to4100230); + assertTrue(upgrades[16] instanceof Upgrade41100to41110); + assertTrue(upgrades[17] instanceof Upgrade41110to41120); + assertTrue(upgrades[18] instanceof Upgrade41120to41130); + assertTrue(upgrades[19] instanceof Upgrade41130to41200); + + assertTrue(Arrays.equals(new String[] {"4.11.0.0", "4.11.1.0"}, upgrades[16].getUpgradableVersionRange())); + assertEquals(currentVersion.toString(), upgrades[19].getUpgradedVersion()); } diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java index 2639968f261a..9b01e5351864 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java @@ -18,14 +18,14 @@ */ package org.apache.cloudstack.storage.motion; +import static com.cloud.storage.snapshot.SnapshotManager.BackupSnapshotAfterTakingSnapshot; + import java.util.HashMap; import java.util.List; import java.util.Map; import javax.inject.Inject; -import com.cloud.agent.api.to.DiskTO; -import com.cloud.storage.Storage; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy; @@ -60,18 +60,19 @@ import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; +import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.NfsTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.configuration.Config; import com.cloud.host.Host; import com.cloud.hypervisor.Hypervisor; import com.cloud.storage.DataStoreRole; -import com.cloud.storage.StorageManager; +import com.cloud.storage.Storage; import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VolumeDao; -import static com.cloud.storage.snapshot.SnapshotManager.BackupSnapshotAfterTakingSnapshot; import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.DB; import com.cloud.utils.exception.CloudRuntimeException; diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index b48ae6d22dc0..9ed95b2388f5 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -2453,6 +2453,7 @@ private Map getVolumeDetails(VolumeInfo volumeInfo) { volumeDetails.put(DiskTO.PROTOCOL_TYPE, (volumeVO.getPoolType() != null) ? volumeVO.getPoolType().toString() : null); volumeDetails.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(storagePoolVO.getId()))); + volumeDetails.put(DiskTO.PATH, volumeVO.getPath()); volumeDetails.put(DiskTO.VOLUME_SIZE, String.valueOf(volumeVO.getSize())); volumeDetails.put(DiskTO.SCSI_NAA_DEVICE_ID, getVolumeProperty(volumeInfo.getId(), DiskTO.SCSI_NAA_DEVICE_ID)); diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java index 4aefccc67abd..231fe272461e 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java @@ -183,6 +183,10 @@ public void createTemplateAsync(TemplateInfo template, DataStore store, AsyncCom // update template_store_ref and template state try { templateOnStore.processEvent(ObjectInDataStoreStateMachine.Event.CreateOnlyRequested); + if(template.getFormat().equals(ImageFormat.PXEBOOT)) { + templateOnStore.processEvent(ObjectInDataStoreStateMachine.Event.OperationSuccessed); + templateOnStore.setSize(0L); + } } catch (Exception e) { TemplateApiResult result = new TemplateApiResult(templateOnStore); result.setResult(e.toString()); @@ -798,7 +802,7 @@ private boolean createChildDataDiskTemplate(DatadiskTO dataDiskTemplate, VMTempl String templateName = dataDiskTemplate.isIso() ? dataDiskTemplate.getPath().substring(dataDiskTemplate.getPath().lastIndexOf(File.separator) + 1) : template.getName() + suffix + diskCount; VMTemplateVO templateVO = new VMTemplateVO(templateId, templateName, format, false, false, false, ttype, template.getUrl(), template.requiresHvm(), template.getBits(), template.getAccountId(), null, templateName, false, guestOsId, false, template.getHypervisorType(), null, - null, false, false, false, false); + null, false, false, false, false, template.getBootFilename()); if (dataDiskTemplate.isIso()){ templateVO.setUniqueName(templateName); } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java index 0151a7cdedb0..f32f5e517fc0 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java @@ -436,6 +436,11 @@ public boolean isRequiresHvm() { return imageVO.isRequiresHvm(); } + @Override + public String getBootFilename() { + return imageVO.getBootFilename(); + } + @Override public String getDisplayText() { return imageVO.getDisplayText(); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java index 3ef9fbc4225e..eb5bef52df47 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java @@ -175,6 +175,14 @@ protected Long getMaxTemplateSizeInBytes() { } } + protected Long getMaxVolumeSizeInBytes() { + try { + return Long.parseLong(configDao.getValue("storage.max.volume.upload.size")) * 1024L * 1024L * 1024L; + } catch (NumberFormatException e) { + return null; + } + } + @Override public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback callback) { CreateContext context = new CreateContext(callback, data); diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java index 53fa21f3a794..cc6965d6cc3a 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java @@ -23,8 +23,6 @@ import javax.inject.Inject; -import com.cloud.storage.VMTemplateVO; -import com.cloud.storage.dao.VMTemplateDao; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -34,7 +32,10 @@ import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; import com.cloud.storage.DataStoreRole; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; public class VolumeDataFactoryImpl implements VolumeDataFactory { @@ -83,7 +84,7 @@ public VolumeInfo getVolume(long volumeId) { return null; } VolumeObject vol = null; - if (volumeVO.getPoolId() == null) { + if (volumeVO.getPoolId() == null || volumeVO.getState() == Volume.State.Uploaded) { DataStore store = null; VolumeDataStoreVO volumeStore = volumeStoreDao.findByVolume(volumeId); if (volumeStore != null) { diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index f74ef7a38771..a2df3ec570bf 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -1776,7 +1776,7 @@ public AsyncCallFuture copyVolume(VolumeInfo srcVolume, DataSto return copyVolumeFromImageToPrimary(srcVolume, destStore); } - if (destStore.getRole() == DataStoreRole.Image) { + if (destStore.getRole() == DataStoreRole.Image || destStore.getRole() == DataStoreRole.ImageCache) { return copyVolumeFromPrimaryToImage(srcVolume, destStore); } diff --git a/packaging/centos7/cloud.spec b/packaging/centos7/cloud.spec index 431dbee93024..42aa21f3eb01 100644 --- a/packaging/centos7/cloud.spec +++ b/packaging/centos7/cloud.spec @@ -212,6 +212,15 @@ if [ \"%{_temp}\" != "" ]; then FLAGS="$FLAGS `rpm --eval %{?_temp}`" fi +if [ "%{_tests}" == "SKIP" ] ; then + echo "Adding skipTests flag to the maven build" + FLAGS="$FLAGS -DskipTests" +fi + +# Installing missing deps +curl -L https://github.com/Juniper/netconf-java/releases/download/1.0.0/Netconf.jar --output netconf-java.jar +mvn install:install-file -Dfile=netconf-java.jar -DgroupId=net.juniper.netconf -DartifactId=netconf-juniper -Dversion=1.0 -Dpackaging=jar + mvn -Psystemvm,developer $FLAGS clean package cd ui && npm install && npm run build && cd .. diff --git a/packaging/package.sh b/packaging/package.sh index bf95f84a11a7..0be1a8802d20 100755 --- a/packaging/package.sh +++ b/packaging/package.sh @@ -35,6 +35,7 @@ Optional arguments: -r, --release integer Set the package release version (default is 1 for normal and prereleases, empty for SNAPSHOT) -s, --simulator string Build package for Simulator ("default"|"DEFAULT"|"simulator"|"SIMULATOR") (default "default") -b, --brand string Set branding to be used in package name (it will override any branding string in POM version) + -S, --skip-tests Set the flag to skip unit tests (if not provided tests will be executed) -T, --use-timestamp Use epoch timestamp instead of SNAPSHOT in the package name (if not provided, use "SNAPSHOT") -t --templates Passes necessary flag to package the required templates. Comma separated string - kvm,xen,vmware,ovm,hyperv @@ -78,6 +79,9 @@ function packaging() { else INDICATOR="SNAPSHOT" fi + if [ "$SKIP_TESTS" == "true" ]; then + DEFTESTS="-D_tests SKIP" + fi DISTRO=$3 @@ -173,7 +177,7 @@ function packaging() { echo ". executing rpmbuild" cp "$PWD/$DISTRO/cloud.spec" "$RPMDIR/SPECS" - (cd "$RPMDIR"; rpmbuild --define "_topdir ${RPMDIR}" "${DEFVER}" "${DEFFULLVER}" "${DEFREL}" ${DEFPRE+"$DEFPRE"} ${DEFOSSNOSS+"$DEFOSSNOSS"} ${DEFSIM+"$DEFSIM"} ${DEFTEMP+"$DEFTEMP"} -bb SPECS/cloud.spec) + (cd "$RPMDIR"; rpmbuild --define "_topdir ${RPMDIR}" "${DEFVER}" "${DEFFULLVER}" "${DEFREL}" ${DEFPRE+"$DEFPRE"} ${DEFOSSNOSS+"$DEFOSSNOSS"} ${DEFSIM+"$DEFSIM"} ${DEFTEMP+"$DEFTEMP"} ${DEFTESTS+"$DEFTESTS"} -bb SPECS/cloud.spec) if [ $? -ne 0 ]; then if [ "$USE_TIMESTAMP" == "true" ]; then (cd $PWD/../; git reset --hard) @@ -194,6 +198,7 @@ SIM="" PACKAGEVAL="" RELEASE="" BRANDING="" +SKIP_TESTS="false" USE_TIMESTAMP="false" unrecognized_flags="" @@ -253,6 +258,11 @@ while [ -n "$1" ]; do shift 2 ;; + -S | --skip-tests) + SKIP_TESTS="true" + shift 1 + ;; + -T | --use-timestamp) USE_TIMESTAMP="true" shift 1 diff --git a/packaging/suse15/cloud.spec b/packaging/suse15/cloud.spec index 9f2dc3782197..5661e9afe906 100644 --- a/packaging/suse15/cloud.spec +++ b/packaging/suse15/cloud.spec @@ -210,6 +210,13 @@ fi mvn -Psystemvm,developer $FLAGS clean package cd ui && npm install && npm run build && cd .. +if [ "%{_tests}" == "SKIP" ] ; then + echo "Adding skipTests flag to the maven build" + FLAGS="$FLAGS -DskipTests" +fi + +mvn -Psystemvm,developer $FLAGS clean package + %install [ ${RPM_BUILD_ROOT} != "/" ] && rm -rf ${RPM_BUILD_ROOT} # Common directories diff --git a/plugins/ca/root-ca/src/test/java/org/apache/cloudstack/ca/provider/RootCAProviderTest.java b/plugins/ca/root-ca/src/test/java/org/apache/cloudstack/ca/provider/RootCAProviderTest.java index 15514b91c785..faaca5a8b147 100644 --- a/plugins/ca/root-ca/src/test/java/org/apache/cloudstack/ca/provider/RootCAProviderTest.java +++ b/plugins/ca/root-ca/src/test/java/org/apache/cloudstack/ca/provider/RootCAProviderTest.java @@ -29,22 +29,15 @@ import java.security.cert.X509Certificate; import java.util.Arrays; -import javax.net.ssl.SSLEngine; - import org.apache.cloudstack.framework.ca.Certificate; -import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.utils.security.CertUtils; -import org.apache.cloudstack.utils.security.SSLUtils; import org.joda.time.DateTime; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; - import org.mockito.junit.MockitoJUnitRunner; -import org.mockito.Mockito; - @RunWith(MockitoJUnitRunner.class) public class RootCAProviderTest { @@ -128,22 +121,22 @@ public void testRevokeCertificate() throws Exception { Assert.assertTrue(provider.revokeCertificate(CertUtils.generateRandomBigInt(), "anyString")); } - @Test - public void testCreateSSLEngineWithoutAuthStrictness() throws Exception { - provider.rootCAAuthStrictness = Mockito.mock(ConfigKey.class); - Mockito.when(provider.rootCAAuthStrictness.value()).thenReturn(Boolean.FALSE); - final SSLEngine e = provider.createSSLEngine(SSLUtils.getSSLContext(), "/1.2.3.4:5678", null); - Assert.assertTrue(e.getWantClientAuth()); - Assert.assertFalse(e.getNeedClientAuth()); - } - - @Test - public void testCreateSSLEngineWithAuthStrictness() throws Exception { - provider.rootCAAuthStrictness = Mockito.mock(ConfigKey.class); - Mockito.when(provider.rootCAAuthStrictness.value()).thenReturn(Boolean.TRUE); - final SSLEngine e = provider.createSSLEngine(SSLUtils.getSSLContext(), "/1.2.3.4:5678", null); - Assert.assertTrue(e.getNeedClientAuth()); - } +// @Test +// public void testCreateSSLEngineWithoutAuthStrictness() throws Exception { +// provider.rootCAAuthStrictness = Mockito.mock(ConfigKey.class); +// Mockito.when(provider.rootCAAuthStrictness.value()).thenReturn(Boolean.FALSE); +// final SSLEngine e = provider.createSSLEngine(SSLUtils.getSSLContext(), "/1.2.3.4:5678", null); +// Assert.assertTrue(e.getWantClientAuth()); +// Assert.assertFalse(e.getNeedClientAuth()); +// } +// +// @Test +// public void testCreateSSLEngineWithAuthStrictness() throws Exception { +// provider.rootCAAuthStrictness = Mockito.mock(ConfigKey.class); +// Mockito.when(provider.rootCAAuthStrictness.value()).thenReturn(Boolean.TRUE); +// final SSLEngine e = provider.createSSLEngine(SSLUtils.getSSLContext(), "/1.2.3.4:5678", null); +// Assert.assertTrue(e.getNeedClientAuth()); +// } @Test public void testGetProviderName() throws Exception { diff --git a/plugins/hypervisors/baremetal/pom.xml b/plugins/hypervisors/baremetal/pom.xml index a6d3fbf374cf..f26909892f91 100755 --- a/plugins/hypervisors/baremetal/pom.xml +++ b/plugins/hypervisors/baremetal/pom.xml @@ -47,5 +47,31 @@ jaxb-impl ${cs.jaxb.version} - + + org.apache.httpcomponents + httpcore + + + org.apache.httpcomponents + httpclient + + + com.google.code.gson + gson + 2.8.0 + + + com.github.scribejava + scribejava-apis + 3.4.1 + + + net.juniper.netconf + netconf-juniper + 1.0 + + + + + diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/database/BaremetalRctVO.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/database/BaremetalRctVO.java index 3a24cf4554d8..b3610d208b9d 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/database/BaremetalRctVO.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/database/BaremetalRctVO.java @@ -45,7 +45,7 @@ public class BaremetalRctVO implements InternalIdentity, Identity { @Column(name = "url") private String url; - @Column(name = "rct") + @Column(name = "rct", length = 65535) private String rct; public long getId() { diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java index 3bdd2e81fb51..dcfacfc8a450 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java @@ -27,31 +27,40 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.UUID; import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.api.ApiConstants; +import org.apache.commons.lang3.StringUtils; import org.apache.log4j.Logger; +import com.cloud.agent.AgentManager; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupRoutingCommand; -import com.cloud.baremetal.networkservice.BareMetalResourceBase; +import com.cloud.api.query.dao.UserVmJoinDao; +import com.cloud.baremetal.database.BaremetalRctDao; import com.cloud.configuration.Config; +import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenterVO; import com.cloud.exception.DiscoveryException; import com.cloud.host.Host; import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.host.dao.HostDetailsDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.network.Network; +import com.cloud.network.dao.NetworkDao; import com.cloud.resource.Discoverer; import com.cloud.resource.DiscovererBase; import com.cloud.resource.ResourceStateAdapter; import com.cloud.resource.ServerResource; import com.cloud.resource.UnableDeleteHostException; +import com.cloud.storage.dao.VMTemplateDao; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; import com.cloud.utils.script.Script2; @@ -65,6 +74,16 @@ public class BareMetalDiscoverer extends DiscovererBase implements Discoverer, R @Inject protected VMInstanceDao _vmDao = null; + @Inject BaremetalVlanManager vlanMgr; + @Inject NetworkDao networkDao; + @Inject HostDao hostDao; + @Inject VMTemplateDao templateDao; + @Inject HostDetailsDao hostDetailsDao; + @Inject ClusterDetailsDao clusterDetailsDao; + @Inject BaremetalRctDao rctDao; + @Inject AgentManager agentManager; + @Inject UserVmJoinDao userVmJoinDao; + @Override public boolean configure(String name, Map params) throws ConfigurationException { _resourceMgr.registerResourceStateAdapter(this.getClass().getSimpleName(), this); @@ -81,13 +100,12 @@ public boolean stop() { public Map> find(long dcId, Long podId, Long clusterId, URI url, String username, String password, List hostTags) throws DiscoveryException { - /* Enable this after we decide to use addBaremetalHostCmd instead of addHostCmd String discoverName = _params.get(ApiConstants.BAREMETAL_DISCOVER_NAME); if (!this.getClass().getName().equals(discoverName)) { return null; - } */ + } - Map> resources = new HashMap>(); + Map> resources = new HashMap>(); Map details = new HashMap(); if (!url.getScheme().equals("http")) { @@ -95,6 +113,7 @@ public Map> find(long dcId, Long p s_logger.debug(msg); return null; } + if (clusterId == null) { String msg = "must specify cluster Id when add host"; s_logger.debug(msg); @@ -125,22 +144,24 @@ public Map> find(long dcId, Long p String ipmiIp = ia.getHostAddress(); String guid = UUID.nameUUIDFromBytes(ipmiIp.getBytes()).toString(); - String injectScript = "scripts/util/ipmi.py"; - String scriptPath = Script.findScript("", injectScript); - if (scriptPath == null) { - throw new CloudRuntimeException("Unable to find key ipmi script " - + injectScript); - } + // only check the URL when the host is not a Baremetal of type MaaS + if (StringUtils.isEmpty(_params.get(ApiConstants.BAREMETAL_MAAS))) { + String injectScript = "scripts/util/ipmi.py"; + String scriptPath = Script.findScript("", injectScript); + if (scriptPath == null) { + throw new CloudRuntimeException("Unable to find key ipmi script " + injectScript); + } - final Script2 command = new Script2(scriptPath, s_logger); - command.add("ping"); - command.add("hostname="+ipmiIp); - command.add("usrname="+username); - command.add("password="+password, ParamType.PASSWORD); - final String result = command.execute(); - if (result != null) { - s_logger.warn(String.format("Can not set up ipmi connection(ip=%1$s, username=%2$s, password=%3$s, args) because %4$s", ipmiIp, username, "******", result)); - return null; + final Script2 command = new Script2(scriptPath, s_logger); + command.add("ping"); + command.add("hostname=" + ipmiIp); + command.add("usrname=" + username); + command.add("password=" + password, ParamType.PASSWORD); + final String result = command.execute(); + if (result != null) { + s_logger.warn(String.format("Can not set up ipmi connection(ip=%1$s, username=%2$s, password=%3$s, args) because %4$s", ipmiIp, username, "******", result)); + return null; + } } ClusterVO clu = _clusterDao.findById(clusterId); @@ -158,28 +179,28 @@ public Map> find(long dcId, Long p params.put(ApiConstants.PRIVATE_IP, ipmiIp); params.put(ApiConstants.USERNAME, username); params.put(ApiConstants.PASSWORD, password); - params.put("vmDao", _vmDao); - params.put("configDao", _configDao); String resourceClassName = _configDao.getValue(Config.ExternalBaremetalResourceClassName.key()); - BareMetalResourceBase resource = null; + BareMetalResource resource = null; if (resourceClassName != null) { Class clazz = Class.forName(resourceClassName); - resource = (BareMetalResourceBase) clazz.newInstance(); + resource = (BareMetalResource) clazz.newInstance(); String externalUrl = _configDao.getValue(Config.ExternalBaremetalSystemUrl.key()); - if (externalUrl == null) { + if (externalUrl == null && resourceClassName != "org.apache.cloudstack.compute.maas.MaasResourceProvider") { throw new IllegalArgumentException(String.format("You must specify ExternalBaremetalSystemUrl in global config page as ExternalBaremetalResourceClassName is not null")); } details.put(BaremetalManager.ExternalBaremetalSystemUrl, externalUrl); } else { - resource = new BareMetalResourceBase(); + resource = new BareMetalResourceProvider(); } + resource.configure("Bare Metal Agent", params); + resource.start(); - String memCapacity = (String)params.get(ApiConstants.MEMORY); - String cpuCapacity = (String)params.get(ApiConstants.CPU_SPEED); - String cpuNum = (String)params.get(ApiConstants.CPU_NUMBER); - String mac = (String)params.get(ApiConstants.HOST_MAC); + String memCapacity = Optional.ofNullable((String)params.get(ApiConstants.MEMORY)).orElse(String.valueOf(resource.getMemCapacity())); + String cpuCapacity = Optional.ofNullable((String)params.get(ApiConstants.CPU_SPEED)).orElse(String.valueOf(resource.getCpuCapacity())); + String cpuNum = Optional.ofNullable((String)params.get(ApiConstants.CPU_NUMBER)).orElse(String.valueOf(resource.getCpuNum())); + String mac = Optional.ofNullable((String)params.get(ApiConstants.HOST_MAC)).orElse(resource.getMac()); if (hostTags != null && hostTags.size() != 0) { details.put("hostTag", hostTags.get(0)); } @@ -198,7 +219,6 @@ public Map> find(long dcId, Long p details.put(BaremetalManager.EchoSecurityGroupAgent, isEchoScAgent); resources.put(resource, details); - resource.start(); zone.setGatewayProvider(Network.Provider.ExternalGateWay.getName()); zone.setDnsProvider(Network.Provider.ExternalDhcpServer.getName()); @@ -273,8 +293,6 @@ protected HashMap buildConfigParams(HostVO host) { HashMap params = super.buildConfigParams(host); params.put("hostId", host.getId()); params.put("ipaddress", host.getPrivateIpAddress()); - params.put("vmDao", _vmDao); - params.put("configDao", _configDao); return params; } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java index c37b51df5e9b..929c8efc7c8e 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java @@ -22,8 +22,8 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.utils.NumbersUtil; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.commons.lang3.StringUtils; import org.apache.log4j.Logger; import com.cloud.capacity.CapacityManager; @@ -46,9 +46,12 @@ import com.cloud.offering.ServiceOffering; import com.cloud.org.Cluster; import com.cloud.resource.ResourceManager; +import com.cloud.utils.NumbersUtil; import com.cloud.utils.component.AdapterBase; +import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; +import com.cloud.vm.dao.VMInstanceDao; public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner { private static final Logger s_logger = Logger.getLogger(BareMetalPlanner.class); @@ -68,6 +71,8 @@ public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner { protected ResourceManager _resourceMgr; @Inject protected ClusterDetailsDao _clusterDetailsDao; + @Inject + protected VMInstanceDao _vmDao; @Override public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid) throws InsufficientServerCapacityException { @@ -104,8 +109,7 @@ public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan pl hosts = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing, cluster.getId(), cluster.getPodId(), cluster.getDataCenterId()); if (hostTag != null) { for (HostVO h : hosts) { - _hostDao.loadDetails(h); - if (h.getDetail("hostTag") != null && h.getDetail("hostTag").equalsIgnoreCase(hostTag)) { + if (hasHostCorrectTag(h, hostTag)) { target = h; break; } @@ -137,7 +141,9 @@ public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan pl Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); - if (_capacityMgr.checkIfHostHasCapacity(h.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { + if (hasHostCorrectTag(h, hostTag) && _capacityMgr.checkIfHostHasCapacity(h.getId(), + cpu_requested, ram_requested, false, + cpuOvercommitRatio, memoryOvercommitRatio, true) && isHostAvailable(h)) { s_logger.debug("Find host " + h.getId() + " has enough capacity"); DataCenter dc = _dcDao.findById(h.getDataCenterId()); Pod pod = _podDao.findById(h.getPodId()); @@ -150,6 +156,26 @@ public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan pl return null; } + private boolean isHostAvailable(HostVO h) { + List vmsRunningOnHost = _vmDao.listByHostId(h.getId()); + List vmsStoppedOnHost = _vmDao.listByLastHostId(h.getId()); + return vmsRunningOnHost.isEmpty() && vmsStoppedOnHost.isEmpty(); + } + + private boolean hasHostCorrectTag(HostVO h, String tag) { + _hostDao.loadDetails(h); + if (StringUtils.isEmpty(tag)) { + return true; + } + if (StringUtils.isEmpty(h.getDetail("hostTag"))) { + return false; + } + if (h.getDetail("hostTag").equalsIgnoreCase(tag)) { + return true; + } + return false; + } + @Override public boolean canHandle(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid) { return vm.getHypervisorType() == HypervisorType.BareMetal; diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalResource.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalResource.java new file mode 100644 index 000000000000..b58a8bb8216e --- /dev/null +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalResource.java @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Automatically generated by addcopyright.py at 01/29/2013 +// Apache License, Version 2.0 (the "License"); you may not use this +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +package com.cloud.baremetal.manager; + +import com.cloud.resource.ServerResource; + +public interface BareMetalResource extends ServerResource { + long getMemCapacity(); + long getCpuCapacity(); + long getCpuNum(); + String getMac(); + +} diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalResourceProvider.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalResourceProvider.java new file mode 100644 index 000000000000..dc5275e60dc1 --- /dev/null +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalResourceProvider.java @@ -0,0 +1,107 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Automatically generated by addcopyright.py at 01/29/2013 +// Apache License, Version 2.0 (the "License"); you may not use this +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +package com.cloud.baremetal.manager; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.log4j.Logger; +import org.springframework.beans.factory.annotation.Configurable; + +import com.cloud.agent.api.HostVmStateReportEntry; +import com.cloud.agent.api.StartupCommand; +import com.cloud.agent.api.StartupRoutingCommand; +import com.cloud.baremetal.networkservice.BareMetalResourceBase; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.dao.VMInstanceDao; + +@Configurable +public class BareMetalResourceProvider extends BareMetalResourceBase implements BareMetalResource { + private static final Logger s_logger = Logger.getLogger(BareMetalResourceProvider.class); + + @Inject protected ConfigurationDao configDao; + @Inject protected VMInstanceDao vmDao; + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + ipmiIface = "default"; + configure(name, params, configDao, vmDao); + + if (params.keySet().size() == 0) { + return true; + } + + return true; + } + + @Override + public boolean start() { + return true; + } + + @Override + public boolean stop() { + return true; + } + + @Override + public StartupCommand[] initialize() { + StartupRoutingCommand cmd = new StartupRoutingCommand(0, 0, 0, 0, null, Hypervisor.HypervisorType.BareMetal, + new HashMap()); + + cmd.setDataCenter(_zone); + cmd.setPod(_pod); + cmd.setCluster(_cluster); + cmd.setGuid(_uuid); + cmd.setName(_ip); + cmd.setPrivateIpAddress(_ip); + cmd.setStorageIpAddress(_ip); + cmd.setVersion(BareMetalResourceBase.class.getPackage().getImplementationVersion()); + cmd.setCpus((int) _cpuNum); + cmd.setSpeed(_cpuCapacity); + cmd.setMemory(_memCapacity); + cmd.setPrivateMacAddress(_mac); + cmd.setPublicMacAddress(_mac); + return new StartupCommand[] { cmd }; + } + + protected Map getHostVmStateReport() { + Map states = new HashMap(); + if (hostId != null) { + final List vms = vmDao.listByHostId(hostId); + for (VMInstanceVO vm : vms) { + states.put( + vm.getInstanceName(), + new HostVmStateReportEntry( + vm.getPowerState(), "host-" + hostId + ) + ); + } + } + return states; + } +} diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalTemplateAdapter.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalTemplateAdapter.java index 8265f951f8a8..764a724fda2f 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalTemplateAdapter.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalTemplateAdapter.java @@ -184,6 +184,14 @@ public boolean delete(TemplateProfile profile) { } } + if (profile.getZoneId() > 0) { + VMTemplateZoneVO templateZone = _tmpltZoneDao.findByZoneTemplate(profile.getZoneId(), templateId); + + if (templateZone != null) { + _tmpltZoneDao.remove(templateZone.getId()); + } + } + s_logger.debug("Successfully marked template host refs for template: " + template.getName() + " as destroyed in zone: " + zoneName); // If there are no more non-destroyed template host entries for this template, delete it diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java index b1aafc692ef1..b3ce3e8a2156 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java @@ -32,6 +32,8 @@ import com.cloud.vm.VMInstanceVO; import com.cloud.vm.dao.VMInstanceDao; import org.apache.cloudstack.api.BaremetalProvisionDoneNotificationCmd; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; import org.apache.log4j.Logger; import org.apache.cloudstack.api.AddBaremetalHostCmd; @@ -45,7 +47,7 @@ import com.cloud.vm.VirtualMachine.Event; import com.cloud.vm.VirtualMachine.State; -public class BaremetalManagerImpl extends ManagerBase implements BaremetalManager, StateListener { +public class BaremetalManagerImpl extends ManagerBase implements BaremetalManager, StateListener, Configurable { private static final Logger s_logger = Logger.getLogger(BaremetalManagerImpl.class); @Inject @@ -53,6 +55,12 @@ public class BaremetalManagerImpl extends ManagerBase implements BaremetalManage @Inject protected VMInstanceDao vmDao; + public static final ConfigKey diskEraseOnDestroy = new ConfigKey(Integer.class, "baremetal.disk.erase.destroy", "Advanced", String.valueOf(0), + "Erase disk on destroy baremetal VM (0=No erase, 1=Quick erase, 2=Full erase)", false, ConfigKey.Scope.Global, null); + + public static final ConfigKey pxeVlan = new ConfigKey(Integer.class, "baremetal.pxe.vlan", "Advanced", null, + "VLAN of the PXE network", false, ConfigKey.Scope.Global, null); + @Override public boolean configure(String name, Map params) throws ConfigurationException { VirtualMachine.State.getStateMachine().registerListener(this); @@ -93,7 +101,7 @@ public boolean postStateTransitionEvent(StateMachine2.Transition t HostVO host = _hostDao.findById(vo.getHostId()); if (host == null) { - s_logger.debug("Skip oldState " + oldState + " to " + "newState " + newState + " transimtion"); + s_logger.debug("Skip state transition from " + oldState + " to " + newState); return true; } _hostDao.loadDetails(host); @@ -153,4 +161,14 @@ public void notifyProvisionDone(BaremetalProvisionDoneNotificationCmd cmd) { s_logger.debug(String.format("received baremetal provision done notification for vm[id:%s name:%s] running on host[mac:%s, ip:%s]", vm.getId(), vm.getInstanceName(), host.getPrivateMacAddress(), host.getPrivateIpAddress())); } + + @Override + public String getConfigComponentName() { + return BaremetalManager.class.getSimpleName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[] {diskEraseOnDestroy, pxeVlan}; + } } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalVlanManager.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalVlanManager.java index b6311f79aab5..a1168b6c0623 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalVlanManager.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalVlanManager.java @@ -19,11 +19,8 @@ import com.cloud.baremetal.networkservice.BaremetalRctResponse; import com.cloud.baremetal.networkservice.BaremetalSwitchBackend; -import com.cloud.deploy.DeployDestination; -import com.cloud.network.Network; import com.cloud.utils.component.Manager; import com.cloud.utils.component.PluggableService; -import com.cloud.vm.VirtualMachineProfile; import org.apache.cloudstack.api.AddBaremetalRctCmd; import org.apache.cloudstack.api.DeleteBaremetalRctCmd; @@ -31,13 +28,15 @@ public interface BaremetalVlanManager extends Manager, PluggableService { BaremetalRctResponse addRct(AddBaremetalRctCmd cmd); - void prepareVlan(Network nw, DeployDestination destHost); - - void releaseVlan(Network nw, VirtualMachineProfile vm); - void registerSwitchBackend(BaremetalSwitchBackend backend); void deleteRct(DeleteBaremetalRctCmd cmd); + void prepareVlan(int vlanId, String macAddress, VlanType type); + + void releaseVlan(int vlanId, String macAddress, VlanType type); + + void releaseAllVlan(String macAddress, VlanType type); + BaremetalRctResponse listRct(); } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalVlanManagerImpl.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalVlanManagerImpl.java index 274962562bec..a2b74265e066 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalVlanManagerImpl.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalVlanManagerImpl.java @@ -17,16 +17,28 @@ // package com.cloud.baremetal.manager; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.AddBaremetalRctCmd; +import org.apache.cloudstack.api.DeleteBaremetalRctCmd; +import org.apache.cloudstack.api.ListBaremetalRctCmd; +import org.apache.cloudstack.utils.baremetal.BaremetalUtils; +import org.springframework.web.client.RestTemplate; + import com.cloud.baremetal.database.BaremetalRctDao; import com.cloud.baremetal.database.BaremetalRctVO; import com.cloud.baremetal.networkservice.BaremetalRctResponse; import com.cloud.baremetal.networkservice.BaremetalSwitchBackend; import com.cloud.baremetal.networkservice.BaremetalVlanStruct; -import com.cloud.deploy.DeployDestination; -import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; -import com.cloud.network.Network; -import com.cloud.network.Networks; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.AccountVO; @@ -38,22 +50,7 @@ import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.vm.VirtualMachineProfile; import com.google.gson.Gson; -import org.apache.cloudstack.acl.RoleType; -import org.apache.cloudstack.api.AddBaremetalRctCmd; -import org.apache.cloudstack.api.DeleteBaremetalRctCmd; -import org.apache.cloudstack.api.ListBaremetalRctCmd; -import org.apache.cloudstack.utils.baremetal.BaremetalUtils; -import org.springframework.web.client.RestTemplate; - -import javax.inject.Inject; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.UUID; /** * Created by frank on 5/8/14. @@ -120,7 +117,8 @@ public BaremetalRctResponse addRct(AddBaremetalRctCmd cmd) { } @Override - public void prepareVlan(Network nw, DeployDestination destHost) { + public void prepareVlan(int vlan, String macAddress, VlanType type) { + List vos = rctDao.listAll(); if (vos.isEmpty()) { throw new CloudRuntimeException("no rack configuration found, please call addBaremetalRct to add one"); @@ -129,12 +127,11 @@ public void prepareVlan(Network nw, DeployDestination destHost) { BaremetalRctVO vo = vos.get(0); BaremetalRct rct = gson.fromJson(vo.getRct(), BaremetalRct.class); - RackPair rp = findRack(rct, destHost.getHost().getPrivateMacAddress()); + RackPair rp = findRack(rct, macAddress); if (rp == null) { - throw new CloudRuntimeException(String.format("cannot find any rack contains host[mac:%s], please double check your rack configuration file, update it and call addBaremetalRct again", destHost.getHost().getPrivateMacAddress())); + throw new CloudRuntimeException(String.format("cannot find any rack contains host[mac:%s], please double check your rack configuration file, update it and call addBaremetalRct again", macAddress)); } - int vlan = Integer.parseInt(Networks.BroadcastDomainType.getValue(nw.getBroadcastUri())); BaremetalSwitchBackend backend = getSwitchBackend(rp.rack.getL2Switch().getType()); BaremetalVlanStruct struct = new BaremetalVlanStruct(); struct.setHostMac(rp.host.getMac()); @@ -144,11 +141,12 @@ public void prepareVlan(Network nw, DeployDestination destHost) { struct.setSwitchType(rp.rack.getL2Switch().getType()); struct.setSwitchUsername(rp.rack.getL2Switch().getUsername()); struct.setVlan(vlan); + struct.setVlanType(type); backend.prepareVlan(struct); } @Override - public void releaseVlan(Network nw, VirtualMachineProfile vm) { + public void releaseVlan(int vlanId, String macAddress, VlanType type) { List vos = rctDao.listAll(); if (vos.isEmpty()) { throw new CloudRuntimeException("no rack configuration found, please call addBaremetalRct to add one"); @@ -156,11 +154,10 @@ public void releaseVlan(Network nw, VirtualMachineProfile vm) { BaremetalRctVO vo = vos.get(0); BaremetalRct rct = gson.fromJson(vo.getRct(), BaremetalRct.class); - HostVO host = hostDao.findById(vm.getVirtualMachine().getHostId()); - RackPair rp = findRack(rct, host.getPrivateMacAddress()); + + RackPair rp = findRack(rct, macAddress); assert rp != null : String.format("where is my rack???"); - int vlan = Integer.parseInt(Networks.BroadcastDomainType.getValue(nw.getBroadcastUri())); BaremetalVlanStruct struct = new BaremetalVlanStruct(); struct.setHostMac(rp.host.getMac()); struct.setPort(rp.host.getPort()); @@ -168,7 +165,35 @@ public void releaseVlan(Network nw, VirtualMachineProfile vm) { struct.setSwitchPassword(rp.rack.getL2Switch().getPassword()); struct.setSwitchType(rp.rack.getL2Switch().getType()); struct.setSwitchUsername(rp.rack.getL2Switch().getUsername()); - struct.setVlan(vlan); + struct.setVlan(vlanId); + struct.setVlanType(type); + struct.setRemoveAll(false); + BaremetalSwitchBackend backend = getSwitchBackend(rp.rack.getL2Switch().getType()); + backend.removePortFromVlan(struct); + } + + @Override + public void releaseAllVlan(String macAddress, VlanType type) { + List vos = rctDao.listAll(); + if (vos.isEmpty()) { + throw new CloudRuntimeException("no rack configuration found, please call addBaremetalRct to add one"); + } + + BaremetalRctVO vo = vos.get(0); + BaremetalRct rct = gson.fromJson(vo.getRct(), BaremetalRct.class); + + RackPair rp = findRack(rct, macAddress); + assert rp != null : String.format("where is my rack???"); + + BaremetalVlanStruct struct = new BaremetalVlanStruct(); + struct.setHostMac(rp.host.getMac()); + struct.setPort(rp.host.getPort()); + struct.setSwitchIp(rp.rack.getL2Switch().getIp()); + struct.setSwitchPassword(rp.rack.getL2Switch().getPassword()); + struct.setSwitchType(rp.rack.getL2Switch().getType()); + struct.setSwitchUsername(rp.rack.getL2Switch().getUsername()); + struct.setVlanType(type); + struct.setRemoveAll(true); BaremetalSwitchBackend backend = getSwitchBackend(rp.rack.getL2Switch().getType()); backend.removePortFromVlan(struct); } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/VlanType.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/VlanType.java new file mode 100644 index 000000000000..ce406771a904 --- /dev/null +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/VlanType.java @@ -0,0 +1,22 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +package com.cloud.baremetal.manager; + +public enum VlanType { + TAGGED, UNTAGGED +} diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BareMetalResourceBase.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BareMetalResourceBase.java index cc419f4158f7..a1fde144ecb1 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BareMetalResourceBase.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BareMetalResourceBase.java @@ -22,8 +22,6 @@ // Automatically generated by addcopyright.py at 04/03/2012 package com.cloud.baremetal.networkservice; -import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -40,13 +38,14 @@ import com.cloud.agent.api.CheckVirtualMachineAnswer; import com.cloud.agent.api.CheckVirtualMachineCommand; import com.cloud.agent.api.Command; -import com.cloud.agent.api.HostVmStateReportEntry; import com.cloud.agent.api.MaintainAnswer; import com.cloud.agent.api.MaintainCommand; import com.cloud.agent.api.MigrateAnswer; import com.cloud.agent.api.MigrateCommand; import com.cloud.agent.api.PingCommand; import com.cloud.agent.api.PingRoutingCommand; +import com.cloud.agent.api.PlugNicAnswer; +import com.cloud.agent.api.PlugNicCommand; import com.cloud.agent.api.PrepareForMigrationAnswer; import com.cloud.agent.api.PrepareForMigrationCommand; import com.cloud.agent.api.ReadyAnswer; @@ -56,10 +55,11 @@ import com.cloud.agent.api.SecurityGroupRulesCmd; import com.cloud.agent.api.StartAnswer; import com.cloud.agent.api.StartCommand; -import com.cloud.agent.api.StartupCommand; -import com.cloud.agent.api.StartupRoutingCommand; import com.cloud.agent.api.StopAnswer; import com.cloud.agent.api.StopCommand; +import com.cloud.agent.api.UnPlugNicAnswer; +import com.cloud.agent.api.UnPlugNicCommand; +import com.cloud.agent.api.baremetal.DestroyCommand; import com.cloud.agent.api.baremetal.IpmISetBootDevCommand; import com.cloud.agent.api.baremetal.IpmISetBootDevCommand.BootDev; import com.cloud.agent.api.baremetal.IpmiBootorResetCommand; @@ -67,7 +67,6 @@ import com.cloud.baremetal.manager.BaremetalManager; import com.cloud.configuration.Config; import com.cloud.host.Host.Type; -import com.cloud.hypervisor.Hypervisor; import com.cloud.resource.ServerResource; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.QueryBuilder; @@ -82,7 +81,7 @@ import com.cloud.vm.VirtualMachine.PowerState; import com.cloud.vm.dao.VMInstanceDao; -public class BareMetalResourceBase extends ManagerBase implements ServerResource { +public abstract class BareMetalResourceBase extends ManagerBase implements ServerResource { private static final Logger s_logger = Logger.getLogger(BareMetalResourceBase.class); protected String _uuid; protected String _zone; @@ -108,26 +107,49 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource protected Script2 _forcePowerOffCommand; protected Script2 _bootOrRebootCommand; protected String _vmName; + protected String ipmiIface; protected int ipmiRetryTimes = 5; + protected long ipmiRetryDelay = 1; + protected long ipmiTimeout = 0; protected boolean provisionDoneNotificationOn = false; protected int isProvisionDoneNotificationTimeout = 1800; - protected ConfigurationDao configDao; - protected VMInstanceDao vmDao; + public long getMemCapacity() { + return _memCapacity; + } + public long getCpuCapacity() { + return _cpuCapacity; + } - @Override - public boolean configure(String name, Map params) throws ConfigurationException { + public long getCpuNum() { + return _cpuNum; + } + + public String getMac() { + return _mac; + } + + public boolean configure(String name, Map params, ConfigurationDao configDao, VMInstanceDao vmDao) throws ConfigurationException { setName(name); + + if (params.keySet().size() == 0) { + return true; + } + _uuid = (String) params.get("guid"); - try { - _memCapacity = Long.parseLong((String) params.get(ApiConstants.MEMORY)) * 1024L * 1024L; - _cpuCapacity = Long.parseLong((String) params.get(ApiConstants.CPU_SPEED)); - _cpuNum = Long.parseLong((String) params.get(ApiConstants.CPU_NUMBER)); - } catch (NumberFormatException e) { - throw new ConfigurationException(String.format("Unable to parse number of CPU or memory capacity " - + "or cpu capacity(cpu number = %1$s memCapacity=%2$s, cpuCapacity=%3$s", params.get(ApiConstants.CPU_NUMBER), - params.get(ApiConstants.MEMORY), params.get(ApiConstants.CPU_SPEED))); + + // MaaS Create Node + if (ApiConstants.BAREMETAL_MAAS_ACTION_CREATE.equals((String) params.get(ApiConstants.BAREMETAL_MAAS_ACTION))) { + try { + _memCapacity = Long.parseLong((String) params.get(ApiConstants.MEMORY)) * 1024L * 1024L; + _cpuCapacity = Long.parseLong((String) params.get(ApiConstants.CPU_SPEED)); + _cpuNum = Long.parseLong((String) params.get(ApiConstants.CPU_NUMBER)); + } catch (NumberFormatException e) { + throw new ConfigurationException(String.format("Unable to parse number of CPU or memory capacity " + + "or cpu capacity(cpu number = %1$s memCapacity=%2$s, cpuCapacity=%3$s", params.get(ApiConstants.CPU_NUMBER), + params.get(ApiConstants.MEMORY), params.get(ApiConstants.CPU_SPEED))); + } } _zone = (String) params.get("zone"); @@ -140,8 +162,6 @@ public boolean configure(String name, Map params) throws Configu _password = (String) params.get(ApiConstants.PASSWORD); _vmName = (String) params.get("vmName"); String echoScAgent = (String) params.get(BaremetalManager.EchoSecurityGroupAgent); - vmDao = (VMInstanceDao) params.get("vmDao"); - configDao = (ConfigurationDao) params.get("configDao"); if (_pod == null) { throw new ConfigurationException("Unable to get the pod"); @@ -155,13 +175,16 @@ public boolean configure(String name, Map params) throws Configu throw new ConfigurationException("Unable to get the host address"); } - if (_mac.equalsIgnoreCase("unknown")) { - throw new ConfigurationException("Unable to get the host mac address"); - } + // MaaS Create Node + if (ApiConstants.BAREMETAL_MAAS_ACTION_CREATE.equals((String) params.get(ApiConstants.BAREMETAL_MAAS_ACTION))) { + if (_mac.equalsIgnoreCase("unknown")) { + throw new ConfigurationException("Unable to get the host mac address"); + } - if (_mac.split(":").length != 6) { - throw new ConfigurationException("Wrong MAC format(" + _mac - + "). It must be in format of for example 00:11:ba:33:aa:dd which is not case sensitive"); + if (_mac.split(":").length != 6) { + throw new ConfigurationException("Wrong MAC format(" + _mac + + "). It must be in format of for example 00:11:ba:33:aa:dd which is not case sensitive"); + } } if (_uuid == null) { @@ -172,24 +195,37 @@ public boolean configure(String name, Map params) throws Configu _isEchoScAgent = Boolean.valueOf(echoScAgent); } - String ipmiIface = "default"; - try { - ipmiIface = configDao.getValue(Config.BaremetalIpmiLanInterface.key()); - } catch (Exception e) { - s_logger.debug(e.getMessage(), e); - } + if (configDao != null) { + try { + ipmiIface = configDao.getValue(Config.BaremetalIpmiLanInterface.key()); + } catch (Exception e) { + s_logger.debug(e.getMessage(), e); + } - try { - ipmiRetryTimes = Integer.parseInt(configDao.getValue(Config.BaremetalIpmiRetryTimes.key())); - } catch (Exception e) { - s_logger.debug(e.getMessage(), e); - } + try { + ipmiRetryTimes = Integer.parseInt(configDao.getValue(Config.BaremetalIpmiRetryTimes.key())); + } catch (Exception e) { + s_logger.debug(e.getMessage(), e); + } - try { - provisionDoneNotificationOn = Boolean.valueOf(configDao.getValue(Config.BaremetalProvisionDoneNotificationEnabled.key())); - isProvisionDoneNotificationTimeout = Integer.parseInt(configDao.getValue(Config.BaremetalProvisionDoneNotificationTimeout.key())); - } catch (Exception e) { - s_logger.debug(e.getMessage(), e); + try { + ipmiRetryDelay = Long.parseLong(configDao.getValue(Config.BaremetalIpmiRetryDelay.key())); + } catch (Exception e) { + s_logger.debug(e.getMessage(), e); + } + + try { + ipmiTimeout = Long.parseLong(configDao.getValue(Config.BaremetalIpmiTimeout.key())); + } catch (Exception e) { + s_logger.debug(e.getMessage(), e); + } + + try { + provisionDoneNotificationOn = Boolean.valueOf(configDao.getValue(Config.BaremetalProvisionDoneNotificationEnabled.key())); + isProvisionDoneNotificationTimeout = Integer.parseInt(configDao.getValue(Config.BaremetalProvisionDoneNotificationTimeout.key())); + } catch (Exception e) { + s_logger.debug(e.getMessage(), e); + } } String injectScript = "scripts/util/ipmi.py"; @@ -198,7 +234,7 @@ public boolean configure(String name, Map params) throws Configu throw new ConfigurationException("Cannot find ping script " + scriptPath); } String pythonPath = "/usr/bin/python"; - _pingCommand = new Script2(pythonPath, s_logger); + _pingCommand = new Script2(pythonPath, ipmiTimeout, s_logger); _pingCommand.add(scriptPath); _pingCommand.add("ping"); _pingCommand.add("interface=" + ipmiIface); @@ -278,19 +314,25 @@ public boolean configure(String name, Map params) throws Configu return true; } - protected boolean doScript(Script cmd) { - return doScript(cmd, null); + @Override + public Type getType() { + return com.cloud.host.Host.Type.Routing; } - protected boolean doScript(Script cmd, int retry) { - return doScript(cmd, null, retry); + @Override + public void disconnected() { + + } + + protected boolean doScript(Script cmd) { + return doScript(cmd, null); } protected boolean doScript(Script cmd, OutputInterpreter interpreter) { - return doScript(cmd, interpreter, ipmiRetryTimes); + return doScript(cmd, interpreter, ipmiRetryTimes, 1); } - protected boolean doScript(Script cmd, OutputInterpreter interpreter, int retry) { + protected boolean doScript(Script cmd, OutputInterpreter interpreter, int retry, long retryDelay) { String res = null; while (retry-- > 0) { if (interpreter == null) { @@ -301,7 +343,7 @@ protected boolean doScript(Script cmd, OutputInterpreter interpreter, int retry) if (res != null && res.startsWith("Error: Unable to establish LAN")) { s_logger.warn("IPMI script timeout(" + cmd.toString() + "), will retry " + retry + " times"); try { - TimeUnit.SECONDS.sleep(1); + TimeUnit.SECONDS.sleep(retryDelay); } catch (InterruptedException e) { s_logger.debug("[ignored] interrupted while waiting to retry running script."); } @@ -317,94 +359,8 @@ protected boolean doScript(Script cmd, OutputInterpreter interpreter, int retry) return false; } - @Override - public boolean start() { - return true; - } - - @Override - public boolean stop() { - return true; - } - - @Override - public Type getType() { - return com.cloud.host.Host.Type.Routing; - } - - protected Map getHostVmStateReport() { - Map states = new HashMap(); - if (hostId != null) { - final List vms = vmDao.listByHostId(hostId); - for (VMInstanceVO vm : vms) { - states.put( - vm.getInstanceName(), - new HostVmStateReportEntry( - vm.getPowerState(), "host-" + hostId - ) - ); - } - } - return states; - } - - @Override - public StartupCommand[] initialize() { - StartupRoutingCommand cmd = new StartupRoutingCommand(0, 0, 0, 0, null, Hypervisor.HypervisorType.BareMetal, - new HashMap()); - - cmd.setDataCenter(_zone); - cmd.setPod(_pod); - cmd.setCluster(_cluster); - cmd.setGuid(_uuid); - cmd.setName(_ip); - cmd.setPrivateIpAddress(_ip); - cmd.setStorageIpAddress(_ip); - cmd.setVersion(BareMetalResourceBase.class.getPackage().getImplementationVersion()); - cmd.setCpus((int) _cpuNum); - cmd.setSpeed(_cpuCapacity); - cmd.setMemory(_memCapacity); - cmd.setPrivateMacAddress(_mac); - cmd.setPublicMacAddress(_mac); - return new StartupCommand[] { cmd }; - } - - private boolean ipmiPing() { - return doScript(_pingCommand); - } - - @Override - public PingCommand getCurrentStatus(long id) { - try { - if (!ipmiPing()) { - Thread.sleep(1000); - if (!ipmiPing()) { - s_logger.warn("Cannot ping ipmi nic " + _ip); - return null; - } - } - } catch (Exception e) { - s_logger.debug("Cannot ping ipmi nic " + _ip, e); - return null; - } - - return new PingRoutingCommand(getType(), id, null); - - /* - if (hostId != null) { - final List vms = vmDao.listByHostId(hostId); - if (vms.isEmpty()) { - return new PingRoutingCommand(getType(), id, null); - } else { - VMInstanceVO vm = vms.get(0); - SecurityGroupHttpClient client = new SecurityGroupHttpClient(); - HashMap> nwGrpStates = client.sync(vm.getInstanceName(), vm.getId(), vm.getPrivateIpAddress()); - return new PingRoutingWithNwGroupsCommand(getType(), id, null, nwGrpStates); - } - } else { - return new PingRoutingCommand(getType(), id, null); - } - */ + protected boolean ipmiPing() { + return doScript(_pingCommand, null, ipmiRetryTimes, ipmiRetryDelay); } protected Answer execute(IpmISetBootDevCommand cmd) { @@ -463,40 +419,16 @@ protected Answer execute(SecurityGroupRulesCmd cmd) { return hc.call(cmd.getGuestIp(), cmd); } - @Override - public Answer executeRequest(Command cmd) { - try { - if (cmd instanceof ReadyCommand) { - return execute((ReadyCommand) cmd); - } else if (cmd instanceof StartCommand) { - return execute((StartCommand) cmd); - } else if (cmd instanceof StopCommand) { - return execute((StopCommand) cmd); - } else if (cmd instanceof RebootCommand) { - return execute((RebootCommand) cmd); - } else if (cmd instanceof IpmISetBootDevCommand) { - return execute((IpmISetBootDevCommand) cmd); - } else if (cmd instanceof MaintainCommand) { - return execute((MaintainCommand) cmd); - } else if (cmd instanceof PrepareForMigrationCommand) { - return execute((PrepareForMigrationCommand) cmd); - } else if (cmd instanceof MigrateCommand) { - return execute((MigrateCommand) cmd); - } else if (cmd instanceof CheckVirtualMachineCommand) { - return execute((CheckVirtualMachineCommand) cmd); - } else if (cmd instanceof IpmiBootorResetCommand) { - return execute((IpmiBootorResetCommand) cmd); - } else if (cmd instanceof SecurityGroupRulesCmd) { - return execute((SecurityGroupRulesCmd) cmd); - } else if (cmd instanceof CheckNetworkCommand) { - return execute((CheckNetworkCommand) cmd); - } else { - return Answer.createUnsupportedCommandAnswer(cmd); - } - } catch (Throwable t) { - s_logger.debug(t.getMessage(), t); - return new Answer(cmd, false, t.getMessage()); - } + protected Answer execute(DestroyCommand cmd) { + return new Answer(cmd, true, "Success"); + } + + protected PlugNicAnswer execute(PlugNicCommand cmd) { + return new PlugNicAnswer(cmd, false, "Adding NIC not suppored"); + } + + protected UnPlugNicAnswer execute(UnPlugNicCommand cmd) { + return new UnPlugNicAnswer(cmd, false, "Adding NIC not suppored"); } protected boolean isPowerOn(String str) { @@ -512,10 +444,10 @@ protected boolean isPowerOn(String str) { protected RebootAnswer execute(final RebootCommand cmd) { String infoStr = "Command not supported in present state"; OutputInterpreter.AllLinesParser interpreter = new OutputInterpreter.AllLinesParser(); - if (!doScript(_rebootCommand, interpreter, 10)) { + if (!doScript(_rebootCommand, interpreter, 10, 1)) { if (interpreter.getLines().contains(infoStr)) { // try again, this error should be temporary - if (!doScript(_rebootCommand, interpreter, 10)) { + if (!doScript(_rebootCommand, interpreter, 10, 1)) { return new RebootAnswer(cmd, "IPMI reboot failed", false); } } else { @@ -632,11 +564,6 @@ protected ReadyAnswer execute(ReadyCommand cmd) { return new ReadyAnswer(cmd); } - @Override - public void disconnected() { - - } - @Override public IAgentControl getAgentControl() { return _agentControl; @@ -647,4 +574,60 @@ public void setAgentControl(IAgentControl agentControl) { _agentControl = agentControl; } + @Override + public Answer executeRequest(Command cmd) { + try { + if (cmd instanceof ReadyCommand) { + return execute((ReadyCommand) cmd); + } else if (cmd instanceof StartCommand) { + return execute((StartCommand) cmd); + } else if (cmd instanceof StopCommand) { + return execute((StopCommand) cmd); + } else if (cmd instanceof RebootCommand) { + return execute((RebootCommand) cmd); + } else if (cmd instanceof IpmISetBootDevCommand) { + return execute((IpmISetBootDevCommand) cmd); + } else if (cmd instanceof MaintainCommand) { + return execute((MaintainCommand) cmd); + } else if (cmd instanceof PrepareForMigrationCommand) { + return execute((PrepareForMigrationCommand) cmd); + } else if (cmd instanceof MigrateCommand) { + return execute((MigrateCommand) cmd); + } else if (cmd instanceof CheckVirtualMachineCommand) { + return execute((CheckVirtualMachineCommand) cmd); + } else if (cmd instanceof IpmiBootorResetCommand) { + return execute((IpmiBootorResetCommand) cmd); + } else if (cmd instanceof SecurityGroupRulesCmd) { + return execute((SecurityGroupRulesCmd) cmd); + } else if (cmd instanceof CheckNetworkCommand) { + return execute((CheckNetworkCommand) cmd); + } else if (cmd instanceof DestroyCommand) { + return execute((DestroyCommand) cmd); + } else if (cmd instanceof PlugNicCommand) { + return execute((PlugNicCommand) cmd); + } else if (cmd instanceof UnPlugNicCommand) { + return execute((UnPlugNicCommand) cmd); + } else { + return Answer.createUnsupportedCommandAnswer(cmd); + } + } catch (Throwable t) { + s_logger.debug(t.getMessage(), t); + return new Answer(cmd, false, t.getMessage()); + } + } + + @Override + public PingCommand getCurrentStatus(long id) { + try { + if (!ipmiPing()) { + s_logger.warn("Cannot ping ipmi nic " + _ip); + return null; + } + } catch (Exception e) { + s_logger.debug("Cannot ping ipmi nic " + _ip, e); + return null; + } + + return new PingRoutingCommand(getType(), id, null); + } } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java index 71e7ae766dc0..62fdf553edea 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java @@ -162,6 +162,7 @@ protected PreparePxeServerAnswer execute(PreparePxeServerCommand cmd) { if (!SSHCmdHelper.sshExecuteCmd(sshConnection, script)) { return new PreparePxeServerAnswer(cmd, "prepare PING at " + _ip + " failed, command:" + script); } + s_logger.debug("Prepare Ping PXE server successfully"); return new PreparePxeServerAnswer(cmd); @@ -190,6 +191,7 @@ protected Answer execute(PrepareCreateTemplateCommand cmd) { if (!SSHCmdHelper.sshExecuteCmd(sshConnection, script)) { return new Answer(cmd, false, "prepare for creating template failed, command:" + script); } + s_logger.debug("Prepare for creating template successfully"); return new Answer(cmd, true, "Success"); diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPxeElement.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPxeElement.java index 17ec90210163..724bed768955 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPxeElement.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPxeElement.java @@ -20,6 +20,7 @@ import com.cloud.baremetal.database.BaremetalPxeVO; import com.cloud.baremetal.manager.BaremetalVlanManager; +import com.cloud.baremetal.manager.VlanType; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterVO; import com.cloud.dc.Pod; @@ -28,12 +29,15 @@ import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.ResourceUnavailableException; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.network.Network; import com.cloud.network.Network.Capability; import com.cloud.network.Network.GuestType; import com.cloud.network.Network.Provider; import com.cloud.network.Network.Service; +import com.cloud.network.Networks; import com.cloud.network.Networks.TrafficType; import com.cloud.network.PhysicalNetworkServiceProvider; import com.cloud.network.element.NetworkElement; @@ -72,6 +76,8 @@ public class BaremetalPxeElement extends AdapterBase implements NetworkElement { BaremetalVlanManager vlanMgr; @Inject DataCenterDao zoneDao; + @Inject + HostDao hostDao; static { Capability cap = new Capability(BaremetalPxeManager.BAREMETAL_PXE_CAPABILITY); @@ -146,7 +152,10 @@ public boolean prepare(Network network, NicProfile nic, VirtualMachineProfile vm } private void prepareVlan(Network network, DeployDestination dest) { - vlanMgr.prepareVlan(network, dest); + + String macAddress = dest.getHost().getPrivateMacAddress(); + int vlan = Integer.parseInt(Networks.BroadcastDomainType.getValue(network.getBroadcastUri())); + vlanMgr.prepareVlan(vlan, macAddress, VlanType.UNTAGGED); } @Override @@ -164,7 +173,11 @@ public boolean release(Network network, NicProfile nic, VirtualMachineProfile vm } private void releaseVlan(Network network, VirtualMachineProfile vm) { - vlanMgr.releaseVlan(network, vm); + + HostVO host = hostDao.findById(vm.getVirtualMachine().getHostId()); + int vlan = Integer.parseInt(Networks.BroadcastDomainType.getValue(network.getBroadcastUri())); + + vlanMgr.releaseVlan(vlan, host.getPrivateMacAddress(), VlanType.UNTAGGED); } @Override diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalVlanStruct.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalVlanStruct.java index 32d9b33a3448..cc060de0f989 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalVlanStruct.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalVlanStruct.java @@ -22,6 +22,8 @@ // Automatically generated by addcopyright.py at 04/03/2012 package com.cloud.baremetal.networkservice; +import com.cloud.baremetal.manager.VlanType; + /** * Created by frank on 9/2/14. */ @@ -33,6 +35,8 @@ public class BaremetalVlanStruct { private String hostMac; private String port; private int vlan; + private VlanType type; + private boolean removeAll; public String getSwitchType() { return switchType; @@ -89,4 +93,20 @@ public int getVlan() { public void setVlan(int vlan) { this.vlan = vlan; } + + public void setVlanType(VlanType type){ + this.type = type; + } + + public VlanType getVlanType(){ + return type; + } + + public boolean isRemoveAll() { + return removeAll; + } + + public void setRemoveAll(boolean removeAll) { + this.removeAll = removeAll; + } } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BrocadeFastIronBaremetalSwitchBackend.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BrocadeFastIronBaremetalSwitchBackend.java new file mode 100644 index 000000000000..ebcc88ba0883 --- /dev/null +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BrocadeFastIronBaremetalSwitchBackend.java @@ -0,0 +1,204 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +package com.cloud.baremetal.networkservice; + +import com.cloud.baremetal.manager.VlanType; +import com.cloud.utils.exception.CloudRuntimeException; +import com.jcraft.jsch.Channel; +import com.jcraft.jsch.JSch; +import com.jcraft.jsch.JSchException; +import com.jcraft.jsch.Session; +import org.apache.log4j.Logger; + +import java.io.IOException; +import java.io.InputStream; +import java.net.UnknownHostException; + +public class BrocadeFastIronBaremetalSwitchBackend implements BaremetalSwitchBackend { + + private static final Logger s_logger = Logger.getLogger(BrocadeFastIronBaremetalSwitchBackend.class); + public static final String TYPE = "Brocade"; + + @Override + public String getSwitchBackendType() { + return TYPE; + } + + @Override + public void prepareVlan(BaremetalVlanStruct struct) { + try { + BrocadeManager bm = new BrocadeManager(struct.getSwitchIp(), struct.getSwitchUsername(), struct.getSwitchPassword()); + bm.assignVlanToPort(struct.getPort(), struct.getVlan(), struct.getVlanType()); + } catch (InterruptedException | JSchException | IOException e) { + s_logger.warn("Error assigning VLAN to PORT", e); + throw new CloudRuntimeException(e); + } + } + + @Override + public void removePortFromVlan(BaremetalVlanStruct struct) { + try { + BrocadeManager bm = new BrocadeManager(struct.getSwitchIp(), struct.getSwitchUsername(), struct.getSwitchPassword()); + bm.removePortFromVlan(struct.getPort(), struct.getVlan(), struct.getVlanType()); + } catch (InterruptedException | JSchException | IOException e) { + s_logger.warn("Error removing VLAN", e); + throw new CloudRuntimeException(e); + } + } + + private class BrocadeManager { + String user; + String password; + String ip; + int port; + + public BrocadeManager(String ip, String user, String password) throws UnknownHostException { + this.user = user; + this.password = password; + this.ip = ip; + this.port = 22; + + } + + public void assignVlanToPort(String port, int vlanId, VlanType vlanType) throws IOException, JSchException, InterruptedException { + + String[] dualModeCmds = { + "en\n", + this.password + "\n", + "config t\n", + "int e " + port + "\n", + "dual-mode " + Integer.toString(vlanId) + "\n", + "end\n", + "exit\n", + "exit\n" + }; + + String[] tagCommands = { + "en\n", + this.password + "\n", + "config t\n", + "vlan " + Integer.toString(vlanId) + "\n", + "tagged e " + port + "\n", + "end\n", + "exit\n", + "exit\n" + }; + + executeCommands(tagCommands); + + //If it is a untagged VLAN, change the interface to dual mode and add it as a default VLAN + if (vlanType.equals(VlanType.UNTAGGED)) { + executeCommands(dualModeCmds); + } + + // TODO: Check if vlan assignement was successful + } + + public void removePortFromVlan(String port, int vlanId, VlanType vlanType) throws JSchException, InterruptedException { + + String[] dualModeCmds = { + "en\n", + this.password + "\n", + "config t\n", + "int e " + port + "\n", + "no dual-mode " + Integer.toString(vlanId) + "\n", + "end\n", + "exit\n", + "exit\n" + }; + + String[] untagCmds = { + "en\n", + this.password + "\n", + "config t\n", + "vlan " + Integer.toString(vlanId) + "\n", + "no tagged " + " e " + port + "\n", + "end\n", + "exit\n", + "exit\n" + }; + + if(vlanType.equals(VlanType.UNTAGGED)){ + executeCommands(dualModeCmds); + } + executeCommands(untagCmds); + + // TODO: Check if vlan removal was successful + } + + private void executeCommands(String[] cmds) throws JSchException, InterruptedException { + + CommandInputStream cs = new CommandInputStream(cmds); + + JSch jsch=new JSch(); + Session session=jsch.getSession(user, ip, port); + session.setPassword(password); + session.setConfig("StrictHostKeyChecking", "no"); + + session.connect(300000); + + Channel channel = session.openChannel("shell"); + channel.setInputStream(cs); + channel.connect(3 * 300000); + + while (!channel.isClosed()){ + Thread.sleep(1000); + } + } + } + + private class CommandInputStream extends InputStream { + + private final String[] cmds; + private int curCmd; + private int curIdx; + + CommandInputStream(String[] cmds) { + this.cmds = cmds; + this.curCmd = 0; + this.curIdx = 0; + } + @Override + public int read() throws IOException { + + if (curCmd >= cmds.length) + return -1; + + + String cmd = cmds[curCmd]; + + char ch = cmd.charAt(curIdx); + curIdx += 1; + + if (ch == '\n'){ + + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + curCmd++; + curIdx = 0; + s_logger.info("[BrocadeSwitchCmd] " + cmd); + } + + return (int)ch; + } + } +} diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/JuniperBaremetalSwitchBackend.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/JuniperBaremetalSwitchBackend.java new file mode 100644 index 000000000000..cc8c7b0addc9 --- /dev/null +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/JuniperBaremetalSwitchBackend.java @@ -0,0 +1,228 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Automatically generated by addcopyright.py at 01/29/2013 +// Apache License, Version 2.0 (the "License"); you may not use this +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + +package com.cloud.baremetal.networkservice; + +import com.cloud.baremetal.manager.BaremetalManagerImpl; +import com.cloud.baremetal.manager.VlanType; +import com.cloud.utils.exception.CloudRuntimeException; +import net.juniper.netconf.Device; +import net.juniper.netconf.NetconfException; +import net.juniper.netconf.XML; +import net.juniper.netconf.XMLBuilder; +import org.apache.log4j.Logger; +import org.w3c.dom.Document; +import org.w3c.dom.Node; +import org.w3c.dom.NodeList; +import org.xml.sax.SAXException; + +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.xpath.XPath; +import javax.xml.xpath.XPathConstants; +import javax.xml.xpath.XPathExpression; +import javax.xml.xpath.XPathExpressionException; +import javax.xml.xpath.XPathFactory; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class JuniperBaremetalSwitchBackend implements BaremetalSwitchBackend { + + private static final Logger s_logger = Logger.getLogger(JuniperBaremetalSwitchBackend.class); + public static final String TYPE = "Juniper"; + public static final int NETCONF_PORT = 22; + + @Override + public String getSwitchBackendType() { + return TYPE; + } + + @Override + public synchronized void prepareVlan(BaremetalVlanStruct struct) { + JuniperDevice juniper = null; + CloudRuntimeException cloudRuntimeException = null; + try { + juniper = new JuniperDevice(struct.getSwitchIp(), NETCONF_PORT, struct.getSwitchUsername(), struct.getSwitchPassword()); + juniper.addVlanToInterface(struct.getPort(), struct.getVlan(), struct.getVlanType()); + } catch (ParserConfigurationException e) { + String mesg = "Invalid configuration to initiate netconf session to the backend switch"; + s_logger.error(mesg, e); + cloudRuntimeException = new CloudRuntimeException(mesg, e); + } catch (SAXException | IOException | XPathExpressionException e) { + String mesg = "Unable to add VLAN to Port"; + s_logger.error(mesg, e); + cloudRuntimeException = new CloudRuntimeException(mesg, e); + } + closeConnection(juniper, cloudRuntimeException); + } + + @Override + public synchronized void removePortFromVlan(BaremetalVlanStruct struct) { + JuniperDevice juniper = null; + CloudRuntimeException cloudRuntimeException = null; + try { + juniper = new JuniperDevice(struct.getSwitchIp(), NETCONF_PORT, struct.getSwitchUsername(), struct.getSwitchPassword()); + if (struct.isRemoveAll()) { + juniper.clearAllVlansFromInterface(struct.getPort()); + } else { + juniper.removeVlanFromInterface(struct.getPort(), struct.getVlan(), struct.getVlanType()); + } + } catch (ParserConfigurationException e) { + String mesg = "Invalid configuration to initiate netconf session to the backend switch"; + s_logger.error(mesg, e); + cloudRuntimeException = new CloudRuntimeException(mesg, e); + } catch (SAXException | IOException e) { + String mesg = String.format("Unable to remove VLAN %d from Port: %s, type : %s", struct.getVlan(), struct.getPort(), struct.getVlanType()); + s_logger.error(mesg, e); + cloudRuntimeException = new CloudRuntimeException(mesg, e); + } catch (XPathExpressionException e) { + e.printStackTrace(); + } + closeConnection(juniper, cloudRuntimeException); + } + + private void closeConnection(JuniperDevice juniper, CloudRuntimeException cloudRuntimeException) { + if(juniper != null) { + juniper.close(); + } + if(cloudRuntimeException != null) { + throw cloudRuntimeException; + } + } + + public class JuniperDevice { + Device device; + + public JuniperDevice(String host, int port, String user, String password) throws ParserConfigurationException, NetconfException { + device = new Device(host, user, password, null, port); + try { + device.connect(); + } catch (Exception e) { + s_logger.error("Error while connecting to the switch", e); + throw e; + } + } + + protected void close() { + device.close(); + } + + public void addVlanToInterface(String interfaceName, int vlanId, VlanType vlanType) throws IOException, SAXException, XPathExpressionException, ParserConfigurationException { + String configTemplate = "set interfaces %s unit 0 family ethernet-switching vlan members %d\n" + + "set interfaces %s unit 0 family ethernet-switching interface-mode trunk\n"; + + if(vlanId == BaremetalManagerImpl.pxeVlan.value()) { + configTemplate += String.format("set protocols lldp interface %s enable\n", interfaceName); + } else { + configTemplate += String.format("delete protocols lldp interface %s\n", interfaceName); + } + + if (vlanType.equals(VlanType.UNTAGGED)) { + configTemplate += String.format("set interfaces %s native-vlan-id %d", interfaceName, vlanId); + } + + String config = String.format(configTemplate, interfaceName, vlanId, interfaceName); + + loadAndCommitConfigs(config); + } + + public void removeVlanFromInterface(String interfaceName, int vlanId, VlanType vlanType) throws SAXException, IOException { + String config = ""; + + if (vlanType.equals(VlanType.UNTAGGED)) { + config += String.format("delete interfaces %s native-vlan-id\n", interfaceName); + } + + config += String.format("delete interfaces %s unit 0 family ethernet-switching vlan members %d\n", interfaceName, vlanId); + + this.device.loadSetConfiguration(config); + + XML candidateConfig = this.device.getCandidateConfig("" + interfaceName + ""); + + if (!candidateConfig.toString().contains("")) { + config += String.format("delete interfaces %s unit 0 family ethernet-switching", interfaceName); + } + + loadAndCommitConfigs(config); + } + + void clearAllVlansFromInterface(String interfaceName) throws IOException, SAXException, XPathExpressionException, ParserConfigurationException { + String config = String.format("delete interfaces %s native-vlan-id\n", interfaceName); + + for (int vl : this.getInterfaceVlans(interfaceName)) { + if (vl > 1) { + config += String.format("delete interfaces %s unit 0 family ethernet-switching vlan members %d\n", interfaceName, vl); + } + } + + config += String.format("delete interfaces %s unit 0 family ethernet-switching", interfaceName); + + loadAndCommitConfigs(config); + } + + private void loadAndCommitConfigs(String config) throws IOException, SAXException { + this.device.loadSetConfiguration(config); + + try { + this.device.commit(); + } catch (Exception e) { + if(device != null) { + s_logger.error(this.device.getLastRPCReply()); + } + throw e; + } + } + + private List getInterfaceVlans(String interfaceName) throws ParserConfigurationException, XPathExpressionException { + List interfaceVlans = new ArrayList<>(); + + XMLBuilder rpcBuilder = new XMLBuilder(); + XML vlanQuery = rpcBuilder.createNewRPC("get-ethernet-switching-interface-information").append("interface-name", interfaceName + ".0"); + XML out = getConfig(vlanQuery.toString()); + + assert out != null; + + Document doc = out.getOwnerDocument(); + XPathFactory xPathfactory = XPathFactory.newInstance(); + XPath xpath = xPathfactory.newXPath(); + XPathExpression expr = xpath.compile("//l2iff-interface-vlan-id"); + + NodeList nl = (NodeList) expr.evaluate(doc, XPathConstants.NODESET); + for (int i =0; i DEFAULT_TIMEOUT_SEC ? timeoutSec : DEFAULT_TIMEOUT_SEC; + + } + + private void signRequest(HttpRequest request) { + + long timestamp = System.currentTimeMillis() / 1000; + Map oauthParams = new HashMap(); + + //oauthParams.put("realm", ""); + oauthParams.put("oauth_version", "1.0"); + oauthParams.put("oauth_signature_method", "PLAINTEXT"); + + oauthParams.put("oauth_nonce", UUID.randomUUID().toString().replaceAll("-", "")); + oauthParams.put("oauth_timestamp", Long.toString(timestamp)); + + oauthParams.put("oauth_consumer_key", conn.getConsumerKey()); + oauthParams.put("oauth_token", conn.getKey()); + + String signature = ""; + try { + signature = "&" + URLEncoder.encode(conn.getSecret(), ENCODING_UTF8); + + oauthParams.put("oauth_signature", signature); + + String oauthHeaderValue = buildOauthHeader(oauthParams); + + request.setHeader(HTTP_HEADER_AUTHORIZATION, oauthHeaderValue); + } catch (UnsupportedEncodingException e) { + s_logger.warn(e.getMessage()); + throw new CloudRuntimeException("Unable to sign request " + e.getMessage()); + } + } + + private static String buildOauthHeader(Map oauthParams) throws UnsupportedEncodingException { + + StringBuilder header = new StringBuilder(); + header.append("OAuth "); + header.append(" realm=\"\", "); + + for (Map.Entry entry : oauthParams.entrySet()) { + header.append(String.format("%s=\"%s\", ", entry.getKey(), URLEncoder.encode(entry.getValue(), ENCODING_UTF8))); + } + + int len = header.length(); + header.delete(len - 2, len - 1); + + return header.toString(); + } + + public String executeApiRequest(HttpRequest request) throws IOException { + + CloseableHttpClient httpclient = HttpClientBuilder.create().build(); + String response = null; + + if (null == httpclient) { + throw new RuntimeException("Unable to create httpClient for request"); + } + + try { + if (request.getFirstHeader(HEADER_CONTENT_TYPE) == null) { + request.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_JSON); + } + request.setHeader(HEADER_ACCEPT, HEADER_VALUE_JSON); + request.setHeader(HEADER_ACCEPT_ENCODING, HEADER_VALUE_TEXT_PLAIN); + + signRequest(request); + + HttpHost target = new HttpHost(conn.getIp(), conn.getPort(), conn.getScheme()); + + HttpResponse httpResponse = httpclient.execute(target, request); + + HttpEntity entity = httpResponse.getEntity(); + StatusLine status = httpResponse.getStatusLine(); + + if (status.getStatusCode() != HttpStatus.SC_NO_CONTENT) { + response = EntityUtils.toString(entity); + + assert response != null; + + if (status.getStatusCode() >= HttpStatus.SC_BAD_REQUEST) { + // check if this is an error + String errMesg = "Error: Non successful response: " + request.getRequestLine() + response; + s_logger.warn(errMesg); + throw new CloudRuntimeException(errMesg); + } + } + } catch (IOException e) { + String errMesg = "Error while trying to get HTTP object: " + request.getRequestLine(); + s_logger.warn(errMesg, e); + throw new CloudRuntimeException("Error while sending request. Error " + e.getMessage()); + } + + return response; + } + + public MaasObject.MaasNode addMachine(MaasObject.AddMachineParameters addMachineParameters) throws IOException { + + HttpPost addMachineReq = new HttpPost(getApiUrl("machines")); + addMachineReq.setEntity(new StringEntity(gson.toJson(addMachineParameters))); + + List params = new ArrayList<>(); + params.add(new BasicNameValuePair("architecture", addMachineParameters.getArch())); + params.add(new BasicNameValuePair("power_type", addMachineParameters.getPowerType())); + params.add(new BasicNameValuePair("mac_addresses", addMachineParameters.getMacAddress())); + params.add(new BasicNameValuePair("power_parameters_power_user", addMachineParameters.getPowerUser())); + params.add(new BasicNameValuePair("power_parameters_power_pass", addMachineParameters.getPowerPassword())); + params.add(new BasicNameValuePair("power_parameters_power_address", addMachineParameters.getPowerAddress())); + addMachineReq.setEntity(new UrlEncodedFormEntity(params, ENCODING_UTF8)); + addMachineReq.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_FORM); + + String response = executeApiRequest(addMachineReq); + + MaasObject.MaasNode node = gson.fromJson(response, MaasObject.MaasNode.class); + + return waitTillReady(node.systemId); + } + + public boolean deleteMachine(String systemId) throws IOException { + + HttpDelete deleteMachineReq = new HttpDelete(getApiUrl("machines", systemId)); + + executeApiRequest(deleteMachineReq); + + s_logger.info("deleted MAAS machine"); + + return true; + } + + public void allocateMachine(MaasObject.AllocateMachineParameters allocateMachineParameters) throws IOException { + + String url = addOperationToApiUrl(getApiUrl("machines"), "allocate"); + HttpPost allocateReq = new HttpPost(url); + + List params = new ArrayList<>(); + params.add(new BasicNameValuePair("system_id", allocateMachineParameters.getSystemId())); + allocateReq.setEntity(new UrlEncodedFormEntity(params, ENCODING_UTF8)); + allocateReq.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_FORM); + + executeApiRequest(allocateReq); + } + + public void addTagToMachine(String systemId, String tagName) throws IOException { + createTagIfNotExist(tagName); + modifyTagsOnMachine(systemId, "add", tagName); + } + + public void removeTagFromMachine(String systemId, String tagName) throws IOException { + modifyTagsOnMachine(systemId, "remove", tagName); + deleteTagIfNotUsed(tagName, "machines"); + } + + private void createTagIfNotExist(String tagName) throws IOException { + try { + // trying to see if tag exists or not + HttpGet req = new HttpGet(getApiUrl("tags", tagName)); + executeApiRequest(req); + } catch (Exception e) { + // tag does not exist on MaaS server, create it now + HttpPost req = new HttpPost(getApiUrl("tags")); + + List params = new ArrayList<>(); + params.add(new BasicNameValuePair("name", tagName)); + req.setEntity(new UrlEncodedFormEntity(params, ENCODING_UTF8)); + req.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_FORM); + + executeApiRequest(req); + } + } + + private void deleteTagIfNotUsed(String tagName, String target) throws IOException { + // trying to see if tag is being used on any target + String response = executeApiRequest(new HttpGet(addOperationToApiUrl(getApiUrl("tags", tagName), target))); + + List nodes = gson.fromJson(response, new TypeToken>(){}.getType()); + + if (nodes.size() == 0) { + // delete tag + executeApiRequest(new HttpDelete(getApiUrl("tags", tagName))); + } + } + + private void modifyTagsOnMachine(String systemId, String action, String tagName) throws UnsupportedEncodingException, IOException { + if (action.equals("remove")) { + try { + // trying to see if tag exists or not + HttpGet req = new HttpGet(getApiUrl("tags", tagName)); + executeApiRequest(req); + } catch (Exception e) { + // do not try to delete a tag from a machine if the tag doesn't exist! + return; + } + } + + String url = addOperationToApiUrl(getApiUrl("tags", tagName), "update_nodes"); + HttpPost req = new HttpPost(url); + + List params = new ArrayList<>(); + params.add(new BasicNameValuePair(action, systemId)); + req.setEntity(new UrlEncodedFormEntity(params, ENCODING_UTF8)); + req.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_FORM); + + executeApiRequest(req); + } + + public MaasObject.MaasNode deployMachine(String systemId, MaasObject.DeployMachineParameters deployMachineParameters) throws IOException { + + String url = addOperationToApiUrl(getApiUrl("machines", systemId), "deploy"); + HttpPost deployMachineReq = new HttpPost(url); + + List params = new ArrayList<>(); + params.add(new BasicNameValuePair("distro_series", deployMachineParameters.getDistroSeries())); + deployMachineReq.setEntity(new UrlEncodedFormEntity(params, ENCODING_UTF8)); + deployMachineReq.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_FORM); + + executeApiRequest(deployMachineReq); + + return waitTillDeployed(systemId); + } + + public MaasObject.MaasNode releaseMachine(String systemId, boolean eraseDisk, boolean fullErase) throws IOException { + + String url = addOperationToApiUrl(getApiUrl("machines", systemId), "release"); + HttpPost releaseMachineReq = new HttpPost(url); + + List params = new ArrayList<>(); + params.add(new BasicNameValuePair("erase", Boolean.toString(eraseDisk))); + params.add(new BasicNameValuePair("quick_erase", Boolean.toString(!fullErase))); + releaseMachineReq.setEntity(new UrlEncodedFormEntity(params, ENCODING_UTF8)); + releaseMachineReq.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_FORM); + + executeApiRequest(releaseMachineReq); + + return waitTillReady(systemId); + } + + public MaasObject.MaasNode getMaasNode(String systemId) throws IOException { + + HttpGet maasNode = new HttpGet(getApiUrl("machines", systemId)); + String response = executeApiRequest(maasNode); + + return gson.fromJson(response, MaasObject.MaasNode.class); + } + + public MaasObject.MaasNode getMaasNodeByMac(String macAddress) throws IOException { + + for (MaasObject.MaasNode node : getMaasNodes()) { + if (node.bootInterface.macAddress.equals(macAddress.toLowerCase())) { + return node; + } + } + + return null; + } + + public List getMaasNodes() throws IOException { + return getMaasNodes(null); + } + + public List getMaasNodes(String pool) throws IOException { + String url = getApiUrl("machines"); + + if (StringUtils.isNotEmpty(pool)) { + url += "?pool=" + pool; + } + + HttpGet maasNodeReq = new HttpGet(url); + + String response = executeApiRequest(maasNodeReq); + + Type listType = new TypeToken>(){}.getType(); + return gson.fromJson(response, listType); + } + + public MaasObject.MaasNode waitTillReady(String systemId) throws IOException { + + int to = this.timeout; + MaasObject.MaasNode maasNode = null; + do { + maasNode = getMaasNode(systemId); + try { + Thread.sleep(POLL_TIMEOUT_SEC*1000); + } catch (InterruptedException e) { + return null; + } + to -= POLL_TIMEOUT_SEC; + } while ((maasNode != null && !maasNode.statusName.equals(MaasObject.MaasState.Ready.toString())) && to>0); + + if (maasNode == null || (!maasNode.statusName.equals(MaasObject.MaasState.Ready.toString()))) { + throw new CloudRuntimeException("Operation Timed out: Unable to add node to MAAS with SystemID " + systemId); + } + + return maasNode; + } + + private MaasObject.MaasNode waitTillDeployed(String systemId) throws IOException { + + int to = this.timeout; + MaasObject.MaasNode maasNode = null; + do { + maasNode = getMaasNode(systemId); + try { + Thread.sleep(POLL_TIMEOUT_SEC*1000); + } catch (InterruptedException e) { + return null; + } + to-=POLL_TIMEOUT_SEC; + } while ((maasNode != null && !maasNode.statusName.equals(MaasObject.MaasState.Deployed.toString())) && to>0); + + if (maasNode == null || (!maasNode.statusName.equals(MaasObject.MaasState.Deployed.toString()))) { + throw new CloudRuntimeException("Unable to deploy node to MAAS with SystemID " + systemId); + } + + return maasNode; + } + + public void setInterface(String systemId, int interfaceId, Integer linkId, Integer subnetId, boolean enableDhcp) throws IOException { + String url; + List params; + + if (linkId != null) { + url = addOperationToApiUrl( + getApiUrl("nodes", systemId, "interfaces", Integer.toString(interfaceId)), + "unlink_subnet" + ); + + HttpPost unlinkReq = new HttpPost(url); + params = new ArrayList<>(); + params.add(new BasicNameValuePair("id", Integer.toString(linkId))); + unlinkReq.setEntity(new UrlEncodedFormEntity(params, ENCODING_UTF8)); + unlinkReq.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_FORM); + executeApiRequest(unlinkReq); + } + + url = addOperationToApiUrl( + getApiUrl("nodes", systemId, "interfaces", Integer.toString(interfaceId)), + "link_subnet" + ); + + HttpPost linkReq = new HttpPost(url); + params = new ArrayList<>(); + params.add(new BasicNameValuePair("subnet", Integer.toString(subnetId))); + params.add(new BasicNameValuePair("mode", enableDhcp ? MODE_DHCP : MODE_LINK_UP)); + params.add(new BasicNameValuePair("force", "True")); + linkReq.setEntity(new UrlEncodedFormEntity(params, ENCODING_UTF8)); + linkReq.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_FORM); + executeApiRequest(linkReq); + } + + public MaasObject.MaasSubnet getDhcpSubnet() throws IOException { + HttpGet subnetReq = new HttpGet(getApiUrl("subnets")); + String response = executeApiRequest(subnetReq); + + Type listType = new TypeToken>(){}.getType(); + List subnets = gson.fromJson(response, listType); + + for (MaasObject.MaasSubnet subnet : subnets) { + if(subnet.vlan.dhcpOn){ + return subnet; + } + } + return null; + } + + public MaasObject.MaasInterface createBondInterface(String systemId, List phyInterfaceIds) throws IOException { + String url = addOperationToApiUrl(getApiUrl("nodes", systemId, "interfaces"), "create_bond"); + HttpPost createBondReq = new HttpPost(url); + + List params = new ArrayList<>(); + params.add(new BasicNameValuePair("system_id", systemId)); + params.add(new BasicNameValuePair("name", "bond0")); + for (Integer phyId : phyInterfaceIds) { + params.add(new BasicNameValuePair("parents", Integer.toString(phyId))); + } + + createBondReq.setEntity(new UrlEncodedFormEntity(params, ENCODING_UTF8)); + createBondReq.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_FORM); + + String resp = executeApiRequest(createBondReq); + + return gson.fromJson(resp, MaasObject.MaasInterface.class); + } + + public void updateInterfaceMac(String systemId, int interfaceId, String mac) throws IOException { + String url = getApiUrl("nodes", systemId, "interfaces", Integer.toString(interfaceId)); + List params = new ArrayList<>(); + params.add(new BasicNameValuePair("mac_address", mac)); + HttpPut updateMacReq = new HttpPut(url); + + updateMacReq.setEntity(new UrlEncodedFormEntity(params, ENCODING_UTF8)); + updateMacReq.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_FORM); + + executeApiRequest(updateMacReq); + s_logger.debug("updated interface mac on " + systemId + " to " + mac); + } + + public void updateHostname(String systemId, String newHostName) throws IOException { + String url = getApiUrl("machines", systemId); + HttpPut updateHostnameReq = new HttpPut(url); + MaasObject.UpdateHostnameParams params = new MaasObject.UpdateHostnameParams(newHostName); + updateHostnameReq.setEntity(new StringEntity(gson.toJson(params))); + + executeApiRequest(updateHostnameReq); + + } + + private List getRackControllers() throws IOException { + String url = getApiUrl("rackcontrollers"); + HttpGet req = new HttpGet(url); + String resp = executeApiRequest(req); + + Type listType = new TypeToken>(){}.getType(); + return gson.fromJson(resp, listType); + } + + public List listImages() throws IOException { + List rc = getRackControllers(); + if (rc != null && rc.size() > 0) { + //pick the first Rack Controller for now + String rcSystemId = rc.get(0).systemId; + String url = addOperationToApiUrl(getApiUrl("rackcontrollers", rcSystemId), "list_boot_images"); + HttpGet listImgReq = new HttpGet(url); + + String resp = executeApiRequest(listImgReq); + MaasObject.ListImagesResponse imgResp = gson.fromJson(resp, MaasObject.ListImagesResponse.class); + return imgResp.images; + } + return null; + } + + private String getApiUrl(String... args) { + + ArrayList urlList = new ArrayList(Arrays.asList(args)); + + urlList.add(0, API_PREFIX); + urlList.add(urlList.size(), ""); + return StringUtils.join(urlList, "/"); + + } + + private String addOperationToApiUrl(String url, String op) { + return url + "?op=" + op; + } +} diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasHostListner.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasHostListner.java new file mode 100644 index 000000000000..8255a839628e --- /dev/null +++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasHostListner.java @@ -0,0 +1,89 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.compute.maas; + +import com.cloud.agent.Listener; +import com.cloud.agent.api.AgentControlAnswer; +import com.cloud.agent.api.AgentControlCommand; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.StartupCommand; +import com.cloud.exception.ConnectionException; +import com.cloud.host.Host; +import com.cloud.host.Status; + +public class MaasHostListner implements Listener { + MaasResourceProvider maasResource; + + public MaasHostListner(MaasResourceProvider maasResource) { + this.maasResource = maasResource; + } + @Override + public boolean processAnswers(long agentId, long seq, Answer[] answers) { + return false; + } + + @Override + public boolean processCommands(long agentId, long seq, Command[] commands) { + return false; + } + + @Override + public AgentControlAnswer processControlCommand(long agentId, AgentControlCommand cmd) { + return null; + } + + @Override + public void processHostAdded(long hostId) { + maasResource.updateHostAddedDetails(hostId); + } + + @Override + public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException { + + } + + @Override + public boolean processDisconnect(long agentId, Status state) { + return false; + } + + @Override + public void processHostAboutToBeRemoved(long hostId) { + + } + + @Override + public void processHostRemoved(long hostId, long clusterId) { + + } + + @Override + public boolean isRecurring() { + return false; + } + + @Override + public int getTimeout() { + return 0; + } + + @Override + public boolean processTimeout(long agentId, long seq) { + return false; + } +} diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasManager.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasManager.java new file mode 100644 index 000000000000..8b8acd2c00c6 --- /dev/null +++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasManager.java @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Automatically generated by addcopyright.py at 01/29/2013 +// Apache License, Version 2.0 (the "License"); you may not use this +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// Automatically generated by addcopyright.py at 04/03/2012 +package org.apache.cloudstack.compute.maas; + +import java.io.IOException; +import java.util.List; + +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.compute.maas.api.ListMaasServiceOfferingsCmd; + +import com.cloud.utils.component.Manager; +import com.cloud.utils.component.PluggableService; + +public interface MaasManager extends PluggableService, Manager { + + MaasApiClient getMaasApiClient(long clusterId) throws ConfigurationException; + + List listMaasServiceOfferings(ListMaasServiceOfferingsCmd cmd) throws ConfigurationException, IOException; +} diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasManagerImpl.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasManagerImpl.java new file mode 100644 index 000000000000..fac8a203f208 --- /dev/null +++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasManagerImpl.java @@ -0,0 +1,287 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Automatically generated by addcopyright.py at 01/29/2013 +// Apache License, Version 2.0 (the "License"); you may not use this +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// Automatically generated by addcopyright.py at 04/03/2012 +package org.apache.cloudstack.compute.maas; + +import java.io.IOException; +import java.util.Arrays; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import com.cloud.agent.AgentManager; + +import com.cloud.api.query.dao.HostJoinDao; +import com.cloud.api.query.dao.UserVmJoinDao; +import com.cloud.api.query.vo.HostJoinVO; +import com.cloud.api.query.vo.UserVmJoinVO; +import com.cloud.configuration.Config; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.utils.crypt.DBEncryptionUtil; +import com.cloud.vm.VirtualMachine; +import org.apache.cloudstack.compute.maas.api.ListMaasServiceOfferingsCmd; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; +import org.apache.log4j.Logger; + +import com.cloud.api.query.dao.ServiceOfferingJoinDao; +import com.cloud.api.query.vo.ServiceOfferingJoinVO; +import com.cloud.user.AccountManager; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.SearchCriteria.Op; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; + +public class MaasManagerImpl extends ManagerBase implements MaasManager, Configurable { + + private static class OfferingStats { + int total = 0; + int available = 0; + int erasing = 0; + } + + public static final Logger LOGGER = Logger.getLogger(MaasManagerImpl.class.getName()); + + @Inject private AgentManager _agentMgr; + @Inject private AccountManager accountMgr; + @Inject private DataCenterDao dcDao; + @Inject protected ConfigurationDao configDao; + @Inject private ClusterDetailsDao clusterDetailsDao; + @Inject private ServiceOfferingJoinDao svcOfferingJoinDao; + @Inject private HostJoinDao _hostJoinDao; + @Inject private UserVmJoinDao _userVmJoinDao; + + @Override + public String getConfigComponentName() { + return MaasManager.class.getSimpleName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[] {}; + } + + @Override + public List> getCommands() { + List> cmds = new ArrayList>(); + cmds.add(ListMaasServiceOfferingsCmd.class); + return cmds; + } + + @Override + public List listMaasServiceOfferings(ListMaasServiceOfferingsCmd cmd) throws ConfigurationException, IOException { + List responses = new ArrayList<>(); + + SearchBuilder serviceOfferingJoinVOSearchBuilder = svcOfferingJoinDao.createSearchBuilder(); + serviceOfferingJoinVOSearchBuilder.and("networkOfferingId", serviceOfferingJoinVOSearchBuilder.entity().getDeploymentPlanner(), Op.EQ); + SearchCriteria serviceOfferingJoinVOSearchCriteria = serviceOfferingJoinVOSearchBuilder.create(); + serviceOfferingJoinVOSearchCriteria.addAnd("deploymentPlanner", SearchCriteria.Op.EQ, "BareMetalPlanner"); + List offerings = svcOfferingJoinDao.search(serviceOfferingJoinVOSearchCriteria, null); + + if (offerings == null || offerings.size() == 0) { + return responses; + } + + List bareMetalHosts = new ArrayList<>(); + + if (cmd.getClusterId() != null) { + if (!accountMgr.isNormalUser(CallContext.current().getCallingAccount().getAccountId())) { + bareMetalHosts = getHostJoinVOSByClusterId(cmd); + } + } else { + bareMetalHosts = getHostJoinVOSByZoneId(cmd); + } + + HashMap bareMetalHostsMap = new HashMap<>(); + + for (HostJoinVO host : bareMetalHosts) { + String key = createSpecKey(host.getTag(), host.getCpus(), host.getSpeed().intValue(), (host.getTotalMemory() / 1048576)); + OfferingStats offeringStats = bareMetalHostsMap.get(key); + + if(offeringStats == null) { + offeringStats = new OfferingStats(); + bareMetalHostsMap.put(key, offeringStats); + } + + offeringStats.total++; + + UserVmJoinVO userVm = getVmByHostId(host.getId()); + if(userVm == null) { + offeringStats.available++; + } else if(userVm.getState().equals(VirtualMachine.State.Expunging) || userVm.getState().equals(VirtualMachine.State.Destroyed)) { + offeringStats.erasing++; + } + } + + offerings.forEach(svc -> { + String key = createSpecKey(svc.getHostTag(), svc.getCpu(), svc.getSpeed(), svc.getRamSize()); + + OfferingStats offeringStats = bareMetalHostsMap.get(key); + if(offeringStats == null) { + offeringStats = new OfferingStats(); + } + + MaasServiceOfferingsResponse response = new MaasServiceOfferingsResponse(); + response.setObjectName("maasserviceoffering");; + response.setOfferingId(svc.getUuid()); + response.setOfferingName(svc.getName()); + response.setAvailable(offeringStats.available); + if (accountMgr.isRootAdmin(CallContext.current().getCallingAccount().getAccountId())) { + response.setTotal(offeringStats.total); + response.setErasing(offeringStats.erasing); + } + + responses.add(response); + }); + return responses; + } + + private List getHostJoinVOSByClusterId(ListMaasServiceOfferingsCmd cmd) { + SearchBuilder hostJoinVOSearchBuilder = _hostJoinDao.createSearchBuilder(); + hostJoinVOSearchBuilder.and("hypervisor_type", hostJoinVOSearchBuilder.entity().getHypervisorType(), Op.EQ); + hostJoinVOSearchBuilder.and("cluster_id", hostJoinVOSearchBuilder.entity().getClusterId(), Op.EQ); + SearchCriteria hostJoinVOSearchCriteria = hostJoinVOSearchBuilder.create(); + hostJoinVOSearchCriteria.setParameters("hypervisor_type", "BareMetal"); + hostJoinVOSearchCriteria.setParameters("cluster_id", cmd.getClusterId()); + return _hostJoinDao.search(hostJoinVOSearchCriteria, null); + } + + private List getHostJoinVOSByZoneId(ListMaasServiceOfferingsCmd cmd) { + List zoneIds = new ArrayList<>(); + if(cmd.getZoneId() != null) { + zoneIds.add(cmd.getZoneId()); + } else { + for(DataCenterVO dataCenterVO : dcDao.listAllZones()) { + zoneIds.add(dataCenterVO.getId()); + } + } + + SearchBuilder hostJoinVOSearchBuilder = _hostJoinDao.createSearchBuilder(); + hostJoinVOSearchBuilder.and("hypervisor_type", hostJoinVOSearchBuilder.entity().getHypervisorType(), Op.EQ); + hostJoinVOSearchBuilder.and("data_center_id", hostJoinVOSearchBuilder.entity().getZoneId(), Op.IN); + SearchCriteria hostJoinVOSearchCriteria = hostJoinVOSearchBuilder.create(); + hostJoinVOSearchCriteria.setParameters("hypervisor_type", "BareMetal"); + hostJoinVOSearchCriteria.setParameters("data_center_id", zoneIds.toArray(new Object[zoneIds.size()])); + return _hostJoinDao.search(hostJoinVOSearchCriteria, null); + } + + private UserVmJoinVO getVmByHostId(long hostId) { + SearchBuilder userVmJoinVOSearchBuilder = _userVmJoinDao.createSearchBuilder(); + userVmJoinVOSearchBuilder.and("hypervisor_type", userVmJoinVOSearchBuilder.entity().getHypervisorType(), Op.EQ); + userVmJoinVOSearchBuilder.and().op("host_id", userVmJoinVOSearchBuilder.entity().getHostId(), Op.EQ); + userVmJoinVOSearchBuilder.or("last_host_id", userVmJoinVOSearchBuilder.entity().getLastHostId(), Op.EQ); + userVmJoinVOSearchBuilder.cp(); + SearchCriteria userVmJoinVOSearchCriteria = userVmJoinVOSearchBuilder.create(); + userVmJoinVOSearchCriteria.setParameters("hypervisor_type", "BareMetal"); + userVmJoinVOSearchCriteria.setParameters("host_id", hostId); + userVmJoinVOSearchCriteria.setParameters("last_host_id", hostId); + return _userVmJoinDao.findOneBy(userVmJoinVOSearchCriteria); + } + + @Override + public MaasApiClient getMaasApiClient(long clusterId) throws ConfigurationException { + Map clusterDetails = clusterDetailsDao.findDetails(clusterId); + String maasUrl = clusterDetails.get("baremetalMaasHost"); + String maasApiKey = DBEncryptionUtil.decrypt(clusterDetails.get("baremetalMaaSKey")); + + String[] maasAddress = maasUrl.split(":"); + String maasScheme = null; + String maasIp = null; + Integer maasPort = -1; + + try { + // scheme://ip_or_dns:port + if (maasAddress.length == 3) { + maasScheme = maasAddress[0]; + maasIp = maasAddress[1].replace("/", ""); + maasPort = Integer.parseInt(maasAddress[2].replace("/", "")); + } + + // scheme://ip_or_dns OR ip_or_dns:port + else if (maasAddress.length == 2) { + if (maasAddress[0].equalsIgnoreCase("http") || maasAddress[0].equalsIgnoreCase("https")) { + maasScheme = maasAddress[0]; + maasIp = maasAddress[1].replace("/", ""); + } else { + maasIp = maasAddress[0].replace("/", ""); + maasPort = Integer.parseInt(maasAddress[1].replace("/", "")); + } + } + + // ip_or_dns + else if (maasAddress.length == 1) { + maasIp = maasAddress[0]; + } + + else { + throw new ConfigurationException(maasUrl + " is not a valid URL for MaaS server"); + } + } catch (NumberFormatException e) { + if (maasAddress.length == 3) { + LOGGER.warn(maasAddress[2].replace("/", "") + " is not a valid port number", e); + } else if (maasAddress.length == 2) { + LOGGER.warn(maasAddress[1].replace("/", "") + " is not a valid port number", e); + } + + throw e; + } + + String[] maasSecrets = maasApiKey.split(":"); + + if (maasSecrets.length != 3) { + LOGGER.warn("MaaS API key is malformed"); + throw new ConfigurationException("MaaS API key is malformed"); + } + + String maasConsumerKey = maasSecrets[0]; + String maasKey = maasSecrets[1]; + String maasSercret = maasSecrets[2]; + + int timeout = Integer.parseInt(configDao.getValue(Config.BaremetalProvisionDoneNotificationTimeout.key())); + + return new MaasApiClient(maasScheme, maasIp, maasPort, maasKey, maasSercret, maasConsumerKey, timeout); + } + + private String createSpecKey(String tags, int cpus, int speed, long memory) { + String key = String.format("%s,%s,%s", cpus, speed, memory); + if(tags != null && !tags.isEmpty()) { + String[] tagArray = tags.split(","); + key += String.join(",", + Arrays.stream(tagArray). + filter(tag -> tag.startsWith("bm")). + sorted(Comparator.naturalOrder()). + collect(Collectors.toList())); + } + return key; + } + +} diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasObject.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasObject.java new file mode 100644 index 000000000000..63790566cadc --- /dev/null +++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasObject.java @@ -0,0 +1,342 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.cloudstack.compute.maas; +import com.google.gson.annotations.SerializedName; + +import java.util.List; + +public class MaasObject { + + private static final String ARCH_AMD64 = "amd64"; + private static final String POWER_TYPE_IPMI = "ipmi"; + + enum MaasState { + Ready, Allocated, Deploying, Deployed; + } + + enum InterfaceType { + physical, bond; + } + + public static class MaasConnection { + + public String scheme; + public String ip; + public int port; + public String key; + public String secret; + public String consumerKey; + + public MaasConnection(String scheme, String ip, int port, String key, String secret, String consumerKey) { + this.scheme = scheme; + this.ip = ip; + this.port = port; + this.key = key; + this.secret = secret; + this.consumerKey = consumerKey; + } + + public String getScheme() { + return scheme; + } + + public String getIp() { + return ip; + } + + public int getPort() { + return port; + } + + public String getKey() { + return key; + } + + public String getSecret() { + return secret; + } + + public String getConsumerKey() { + return consumerKey; + } + } + + public class MaasNode { + + public String hostname; + + @SerializedName("power_state") + public String powerState; + + @SerializedName("power_type") + public String powerType; + + @SerializedName("system_id") + public String systemId; + + @SerializedName("status_name") + public String statusName; + + @SerializedName("cpu_count") + public Integer cpuCount; + + @SerializedName("cpu_speed") + public Long cpuSpeed; + + @SerializedName("memory") + public Long memory; + + @SerializedName("storage") + public Double storage; + + @SerializedName("boot_interface") + public MaasInterface bootInterface; + + @SerializedName("interface_set") + public MaasInterface[] interfaceSet; + + public String getSystemId() { + return systemId; + } + + public String getStatusName() { + return statusName; + } + + public Integer getCpuCount() { + return cpuCount; + } + + public Long getCpuSpeed() { + return cpuSpeed; + } + + public Long getMemory() { + return memory; + } + + public Double getStorage() { + return storage; + } + + public MaasInterface getBootInterface() { + return bootInterface; + } + + public MaasInterface[] getInterfaceSet() { + return interfaceSet; + } + } + + public class MaasInterface { + + public int id; + + public String name; + + public String type; + + public MaasLink[] links; + + public boolean enabled; + + @SerializedName("mac_address") + public String macAddress; + } + + public class MaasLink { + public int id; + public String mode; + public MaasSubnet subnet; + } + + public class MaasSubnet { + public int id; + public String name; + public MaasVlan vlan; + } + + public class MaasVlan { + public int id; + + @SerializedName("dhcp_on") + public boolean dhcpOn; + } + + public static class AddMachineParameters { + + @SerializedName("mac_addresses") /* For now only one pxe mac address */ + public String macAddress; + + @SerializedName("power_type") + public String powerType; + + @SerializedName("architecture") + public String arch; + + @SerializedName("power_parameters_power_user") + public String powerUser; + + @SerializedName("power_parameters_power_pass") + public String powerPassword; + + @SerializedName("power_parameters_power_address") + public String powerAddress; + + public String hostname; + + public AddMachineParameters(String powerAddress, String macAddress, String powerUser, String powerPassword, String hostname) { + this.powerAddress = powerAddress; + this.macAddress = macAddress; + this.powerUser = powerUser; + this.powerPassword = powerPassword; + this.hostname = hostname; + this.arch = ARCH_AMD64; + this.powerType = POWER_TYPE_IPMI; + } + + public String getMacAddress() { + return macAddress; + } + + public String getPowerType() { + return powerType; + } + + public String getArch() { + return arch; + } + + public String getPowerUser() { + return powerUser; + } + + public String getPowerPassword() { + return powerPassword; + } + + public String getPowerAddress() { + return powerAddress; + } + + public String getHostname() { + return hostname; + } + } + + public static class DeployMachineParameters{ + + @SerializedName("distro_series") + String distroSeries; + + public DeployMachineParameters(String distroSeries) { + this.distroSeries = distroSeries; + } + + public String getDistroSeries() { + return distroSeries; + } + } + + public static class AllocateMachineParameters { + + @SerializedName("system_id") + String systemId; + + public AllocateMachineParameters(String systemId) { + this.systemId = systemId; + } + + public String getSystemId() { + return systemId; + } + } + + public static class UnlinkSubnetParameters { + Integer id; + + public UnlinkSubnetParameters(Integer id) { + this.id = id; + } + } + + public static class UpdateHostnameParams { + + String hostname; + + public UpdateHostnameParams(String hostname) { + this.hostname = hostname; + } + } + + public static class LinkSubnetParameters { + String mode; + Integer subnet; + + public LinkSubnetParameters(String mode, Integer subnet) { + this.mode = mode; + this.subnet = subnet; + } + } + + public static class ReleaseMachineParameters { + Boolean erase; + + @SerializedName("secure_erase") + Boolean secureErase; + + @SerializedName("quick_erase") + Boolean quickErase; + + public ReleaseMachineParameters(Boolean erase, Boolean secureErase, Boolean quickErase) { + this.erase = erase; + this.secureErase = secureErase; + this.quickErase = quickErase; + } + } + + public static class CreateBondInterfaceParameters { + String name; + + List parents; + + @SerializedName("system_id") + String systemId; + + public CreateBondInterfaceParameters(String name, List parents, String systemId) { + this.name = name; + this.parents = parents; + this.systemId = systemId; + } + } + + public static class RackController { + @SerializedName("system_id") + String systemId; + } + + public static class BootImage { + String name; + } + + public static class ListImagesResponse { + List images; + } +} diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasResourceProvider.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasResourceProvider.java new file mode 100644 index 000000000000..d68eee8c3b6b --- /dev/null +++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasResourceProvider.java @@ -0,0 +1,684 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.cloudstack.compute.maas; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import javax.annotation.PostConstruct; +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.compute.maas.MaasObject.MaasInterface; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.commons.lang3.StringUtils; +import org.apache.log4j.Logger; +import org.springframework.beans.factory.annotation.Configurable; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.PlugNicAnswer; +import com.cloud.agent.api.PlugNicCommand; +import com.cloud.agent.api.ReadyAnswer; +import com.cloud.agent.api.ReadyCommand; +import com.cloud.agent.api.StartAnswer; +import com.cloud.agent.api.StartCommand; +import com.cloud.agent.api.StartupCommand; +import com.cloud.agent.api.StartupRoutingCommand; +import com.cloud.agent.api.UnPlugNicAnswer; +import com.cloud.agent.api.UnPlugNicCommand; +import com.cloud.agent.api.baremetal.DestroyCommand; +import com.cloud.agent.api.to.NicTO; +import com.cloud.agent.api.to.VirtualMachineTO; +import com.cloud.api.query.dao.UserVmJoinDao; +import com.cloud.api.query.vo.UserVmJoinVO; +import com.cloud.baremetal.database.BaremetalRctDao; +import com.cloud.baremetal.database.BaremetalRctVO; +import com.cloud.baremetal.manager.BareMetalResource; +import com.cloud.baremetal.manager.BaremetalManagerImpl; +import com.cloud.baremetal.manager.BaremetalRct; +import com.cloud.baremetal.manager.BaremetalVlanManager; +import com.cloud.baremetal.manager.VlanType; +import com.cloud.baremetal.networkservice.BareMetalResourceBase; +import com.cloud.host.DetailVO; +import com.cloud.host.Host.Type; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.host.dao.HostDetailsDao; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.network.Network; +import com.cloud.network.Networks; +import com.cloud.network.dao.NetworkDao; +import com.cloud.network.dao.NetworkVO; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.OutputInterpreter; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.VMInstanceDao; +import com.google.gson.Gson; + +@Configurable +public class MaasResourceProvider extends BareMetalResourceBase implements BareMetalResource { + + private static final Logger s_logger = Logger.getLogger(MaasResourceProvider.class); + private static final String MAAS_ID_KEY = "MaasSystemId"; + + private String maasUniqueId = null; + private MaasObject.MaasNode maasNode = null; + private MaasApiClient maasApi; + + protected static ConfigurationDao configDao; + protected static VMInstanceDao vmDao; + + private static BaremetalVlanManager vlanMgr; + private static NetworkDao networkDao; + private static HostDao hostDao; + private static VMTemplateDao templateDao; + private static HostDetailsDao hostDetailsDao; + private static MaasManager maasManager; + private static BaremetalRctDao rctDao; + private static AgentManager agentMgr; + private static UserVmJoinDao userVmJoinDao; + + @Inject protected ConfigurationDao _configDao; + @Inject protected VMInstanceDao _vmDao; + + @Inject private BaremetalVlanManager _vlanMgr; + @Inject private NetworkDao _networkDao; + @Inject private HostDao _hostDao; + @Inject private VMTemplateDao _templateDao; + @Inject private HostDetailsDao _hostDetailsDao; + @Inject private MaasManager _maasManager; + @Inject private BaremetalRctDao _rctDao; + @Inject private AgentManager _agentMgr; + @Inject private UserVmJoinDao _userVmJoinDao; + private MaasHostListner hostListner; + + private Gson gson = new Gson(); + + @PostConstruct + void init() { + if (_configDao != null) { + configDao = _configDao; + } + if (_vmDao != null) { + vmDao = _vmDao; + } + if (_vlanMgr != null) { + vlanMgr = _vlanMgr; + } + if (_networkDao != null) { + networkDao = _networkDao; + } + if (_hostDao != null) { + hostDao = _hostDao; + } + if (_templateDao != null) { + templateDao = _templateDao; + } + if (_hostDetailsDao != null) { + hostDetailsDao = _hostDetailsDao; + } + if (_maasManager != null) { + maasManager = _maasManager; + } + if (_rctDao != null) { + rctDao = _rctDao; + } + if (_agentMgr != null) { + agentMgr = _agentMgr; + } + if (_userVmJoinDao != null) { + userVmJoinDao = _userVmJoinDao; + } + } + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + ipmiIface = "lanplus"; + configure(name, params, configDao, vmDao); + + if (params.keySet().size() == 0) { + return true; + } + + // MaaS Import Node + if (ApiConstants.BAREMETAL_MAAS_ACTION_IMPORT.equals((String) params.get(ApiConstants.BAREMETAL_MAAS_ACTION))) { + maasUniqueId = (String) params.get(ApiConstants.BAREMETAL_MAAS_NODE_ID); + + if (maasUniqueId == null) { + throw new ConfigurationException("Unable to get the host unique id"); + } + } + + if (StringUtils.isNotEmpty((String) params.get("MaasSystemId")) && StringUtils.isEmpty(maasUniqueId)) { + maasUniqueId = (String) params.get("MaasSystemId"); + } + + if (configDao == null) { + return true; + } + + maasApi = maasManager.getMaasApiClient(Long.parseLong(_cluster)); + hostListner = new MaasHostListner(this); + agentMgr.registerForHostEvents(hostListner, true, false, true); + + return true; + } + + @Override + public Type getType() { + return com.cloud.host.Host.Type.Routing; + } + + @Override + public StartupCommand[] initialize() { + StartupRoutingCommand cmd = new StartupRoutingCommand(0, 0, 0, 0, null, Hypervisor.HypervisorType.BareMetal, + new HashMap()); + + cmd.setDataCenter(_zone); + cmd.setPod(_pod); + cmd.setCluster(_cluster); + cmd.setGuid(_uuid); + cmd.setName(maasUniqueId); + cmd.setPrivateIpAddress(_ip); + cmd.setStorageIpAddress(_ip); + cmd.setVersion(BareMetalResourceBase.class.getPackage().getImplementationVersion()); + cmd.setCpus((int) _cpuNum); + cmd.setSpeed(_cpuCapacity); + cmd.setMemory(_memCapacity); + cmd.setPrivateMacAddress(_mac); + cmd.setPublicMacAddress(_mac); + return new StartupCommand[] { cmd }; + } + + protected Answer execute(DestroyCommand cmd) { + + try { + maasNode = maasApi.getMaasNode(maasNode.getSystemId()); + assert maasNode != null; + } catch (IOException e) { + throw new CloudRuntimeException("Unable to get MAAS node", e); + } + + try { + VirtualMachineTO vm = cmd.getVm(); + VMInstanceVO vmvo = vmDao.findById(vm.getId()); + vmvo.setHostId(hostId); //hostid is unset, set it here so we don't get NPE downstream + + for (NicTO nic : vm.getNics()) { + Network nw = networkDao.findByUuid(nic.getNetworkUuid()); + if (nw != null) { + int vlan = Integer.parseInt(Networks.BroadcastDomainType.getValue(nw.getBroadcastUri())); + releaseVlan(vlan, nic.isDefaultNic()? VlanType.UNTAGGED: VlanType.TAGGED, false); + } + } + + if (!doScript(_setPxeBootCommand)) { + throw new CloudRuntimeException("Set " + _ip + " boot dev to PXE failed"); + } + + if (!doScript(_powerOffCommand)) { + throw new CloudRuntimeException("Unable to power off " + _ip); + } + + if (BaremetalManagerImpl.pxeVlan.value() != null) { + prepareVlan(BaremetalManagerImpl.pxeVlan.value(), VlanType.UNTAGGED); + } + + UserVmJoinVO uservm = userVmJoinDao.findById(vmvo.getId()); + + maasApi.removeTagFromMachine(maasNode.getSystemId(), "accountid_" + uservm.getAccountUuid()); + maasApi.removeTagFromMachine(maasNode.getSystemId(), "domainid_" + uservm.getDomainUuid()); + + if (StringUtils.isNotEmpty(uservm.getProjectUuid())) { + maasApi.removeTagFromMachine(maasNode.getSystemId(), "projectid_" + uservm.getProjectUuid()); + } + + if (!maasNode.getStatusName().equals(MaasObject.MaasState.Ready.toString())){ + Integer eraseStrategy = BaremetalManagerImpl.diskEraseOnDestroy.value(); + boolean eraseDisk = eraseStrategy == 1 || eraseStrategy == 2; + boolean fullErase = eraseStrategy == 2; + maasApi.releaseMachine(maasNode.getSystemId(), eraseDisk, fullErase); + } + + String hostname = "HOST-" + Long.toString(hostId); + maasApi.updateHostname(maasNode.getSystemId(), hostname); + + } catch (IOException e) { + s_logger.warn("Unable to destroy the node on MAAS " + maasNode.getSystemId(), e); + //TODO: Move the node back to the right VLAN + //TODO: Do we move the node to Broken state? Do we make the status as alert on Cloudstack? + return new Answer(cmd, false, e.getMessage()); + } + + return new Answer(cmd, true, "Success"); + } + + protected StartAnswer execute(StartCommand cmd) { + + VirtualMachineTO vm = cmd.getVirtualMachine(); + VMInstanceVO vmvo = vmDao.findById(vm.getId()); + + if (vmvo == null) { + throw new CloudRuntimeException("Unable to find VM in the DB " + vm.getName()); + } + + OutputInterpreter.AllLinesParser interpreter = new OutputInterpreter.AllLinesParser(); + if (!doScript(_getStatusCommand, interpreter)) { + return new StartAnswer(cmd, "Cannot get current power status of " + getName()); + } + + NicTO defaultNic = getDefaultNic(vm); + if (defaultNic == null) { + throw new CloudRuntimeException("Unable to get the default nic for VM " + vm.getId()); + } + + HostVO host = hostDao.findById(vmvo.getHostId()); + if (host == null) { + throw new CloudRuntimeException("Unable to get the host for VM " + vm.getId()); + } + + //find the switch which is responsible for this mac + Network nw = networkDao.findByUuid(defaultNic.getNetworkUuid()); + if (nw == null) { + throw new CloudRuntimeException("Unable to get the network for VM " + vm.getId() + " With network ID " + defaultNic.getNetworkUuid()); + } + int vlan = Integer.parseInt(Networks.BroadcastDomainType.getValue(nw.getBroadcastUri())); + + try { + maasNode = maasApi.getMaasNode(maasNode.getSystemId()); + assert maasNode != null; + } catch (IOException e) { + throw new CloudRuntimeException("Unable to get info from maas node"); + } + + //if the host is already deployed, just start it + if (vmvo.getLastHostId() != null ) { + if (vmvo.getLastHostId().equals(hostId) && maasNode.getStatusName().equals(MaasObject.MaasState.Deployed.toString())) { + if (!doScript(_bootOrRebootCommand)) { + throw new CloudRuntimeException("IPMI reboot failed for host " + _ip); + } + return new StartAnswer(cmd); + } else { + s_logger.warn("Bad state, VM has lastHostId but MAAS is not in deployed state"); + // XXX: Do something here + return new StartAnswer(cmd, "Unable to start VM because the baremetal is in bad state"); + } + } + + //deploy OS on the host using MAAS + long templateId = vmvo.getTemplateId(); + VMTemplateVO template = templateDao.findById(templateId); + String templateUrl = template.getUrl(); + + assert templateUrl != null; + + checkTemplateOnMaas(templateUrl); + + if (VirtualMachine.State.Starting != vmvo.getState()) { + throw new CloudRuntimeException(String.format("baremetal instance[name:%s, state:%s] is not in state of Starting", vmvo.getInstanceName(), vmvo.getState())); + } + + if (!maasNode.statusName.equals(MaasObject.MaasState.Ready.toString())) { + throw new CloudRuntimeException(String.format("Maas State is not in ready %s %s", vmvo.getInstanceName(), maasNode.systemId)); + } + + try { + + // Before we prepare VLANs, we must be sure that there + // are no other VLANs on the ports just to be safe + if (BaremetalManagerImpl.pxeVlan.value() != null) { + releaseVlan(BaremetalManagerImpl.pxeVlan.value(), VlanType.UNTAGGED, true); + prepareVlan(BaremetalManagerImpl.pxeVlan.value(), VlanType.UNTAGGED); + } + + maasApi.updateHostname(maasNode.getSystemId(), vm.getName()); + setupMaasBonding(maasNode, defaultNic.getMac()); + + MaasObject.AllocateMachineParameters allocateMachineParameters = new MaasObject.AllocateMachineParameters(maasNode.getSystemId()); + maasApi.allocateMachine(allocateMachineParameters); + + UserVmJoinVO uservm = userVmJoinDao.findById(vmvo.getId()); + + maasApi.addTagToMachine(maasNode.getSystemId(), "accountid_" + uservm.getAccountUuid()); + maasApi.addTagToMachine(maasNode.getSystemId(), "domainid_" + uservm.getDomainUuid()); + + if (StringUtils.isNotEmpty(uservm.getProjectUuid())) { + maasApi.addTagToMachine(maasNode.getSystemId(), "projectid_" + uservm.getProjectUuid()); + } + + MaasObject.DeployMachineParameters deployMachineParameters = new MaasObject.DeployMachineParameters(templateUrl); + maasNode = maasApi.deployMachine(maasNode.getSystemId(), deployMachineParameters); + + if (!doScript(_setDiskBootCommand)) { + throw new CloudRuntimeException("Set " + _ip + " boot dev to Disk failed"); + } + + // Before we prepare VLANs, we must to remove + // default PXE VLAN on the ports just to be safe + if (BaremetalManagerImpl.pxeVlan.value() != null) { + releaseVlan(BaremetalManagerImpl.pxeVlan.value(), VlanType.UNTAGGED, false); + } + prepareVlan(vlan, VlanType.UNTAGGED); + + // reboot the host so that it picks up the new config from VR DHCP + if (!doScript(_bootOrRebootCommand)) { + throw new CloudRuntimeException("IPMI reboot failed for host " + _ip); + } + + } catch (Exception e) { + s_logger.error(e.getMessage(), e); + + try { + releaseVlan(vlan, VlanType.UNTAGGED, false); + } catch (Exception ex) { + s_logger.error("Failed cleanup of VLANs ", ex); + } + + try { + maasNode = maasApi.getMaasNode(maasNode.getSystemId()); + Integer eraseStrategy = BaremetalManagerImpl.diskEraseOnDestroy.value(); + boolean eraseDisk = eraseStrategy == 1 || eraseStrategy == 2; + boolean fullErase = eraseStrategy == 2; + maasApi.releaseMachine(maasNode.getSystemId(), eraseDisk, fullErase); + } catch (IOException ex) { + //XXX: put node into alert state, manual intervention required + s_logger.error("Unable to release node " + maasNode.getSystemId(), ex); + } + + doScript(_powerOffCommand); + return new StartAnswer(cmd, e.getMessage()); + } + + vmvo.setState(VirtualMachine.State.Running); + vmvo.setLastHostId(vmvo.getHostId()); + vmDao.update(vmvo.getId(), vmvo); + + s_logger.debug(String.format("received baremetal provision done notification for vm[id:%s name:%s] running on host[mac:%s, ip:%s]", + vm.getId(), vmvo.getInstanceName(), vmvo.getPrivateMacAddress(), vmvo.getPrivateIpAddress())); + + s_logger.debug("Start bare metal vm " + vm.getName() + "successfully"); + _vmName = vm.getName(); + return new StartAnswer(cmd); + } + + private void checkTemplateOnMaas(String templateUrl) { + try { + boolean imgFound = false; + for (MaasObject.BootImage img: maasApi.listImages()) { + if (img.name.contains(templateUrl)) { + imgFound = true; + break; + } + } + + if (!imgFound) { + throw new CloudRuntimeException("Template " + templateUrl + " Not found in MAAS"); + } + } catch (IOException e) { + throw new CloudRuntimeException("Unable to list boot images for MAAS", e); + } + } + + protected ReadyAnswer execute(ReadyCommand cmd) { + return new ReadyAnswer(cmd); + } + + protected PlugNicAnswer execute(PlugNicCommand cmd) { + + NicTO nic = cmd.getNic(); + NetworkVO nw = networkDao.findByUuid(nic.getNetworkUuid()); + int vlan = Integer.parseInt(Networks.BroadcastDomainType.getValue(nw.getBroadcastUri())); + + try { + prepareVlan(vlan, VlanType.TAGGED); + } catch (Exception e) { + String errMesg = "Unable to add Nic " + nic.getUuid() + " to network " + nw.getId(); + s_logger.warn(errMesg, e); + releaseVlan(vlan, VlanType.TAGGED, false); + throw new CloudRuntimeException(errMesg, e); + } + + return new PlugNicAnswer(cmd, true, "Nic " + nic.getUuid() + " Added to network " + nw.getId()); + } + + protected UnPlugNicAnswer execute(UnPlugNicCommand cmd) { + + NicTO nic = cmd.getNic(); + NetworkVO nw = networkDao.findByUuid(nic.getNetworkUuid()); + int vlan = Integer.parseInt(Networks.BroadcastDomainType.getValue(nw.getBroadcastUri())); + + if (nic.isDefaultNic()) { + throw new CloudRuntimeException("Cannot unplug default NIC for baremetal"); + } + + try { + releaseVlan(vlan, VlanType.TAGGED, false); + } catch (Exception e) { + String errMesg = "Unable to add Nic " + nic.getUuid() + " to network " + nw.getId(); + s_logger.warn(errMesg, e); + prepareVlan(vlan, VlanType.TAGGED); + throw new CloudRuntimeException(errMesg, e); + } + + return new UnPlugNicAnswer(cmd, true, "Nic " + nic.getUuid() + " Added to network " + nw.getId()); + } + + @Override + public boolean start() { + if (_zone == null) { + return true; + } + if (configDao == null) { + return true; + } + + // Node Create + if (StringUtils.isEmpty(maasUniqueId)) { + MaasObject.AddMachineParameters maasMachine = new MaasObject.AddMachineParameters(_ip, _mac, _username, _password, _uuid); + + try { + if (hostId == null) { + addMassMachine(maasMachine); + } else { + DetailVO maasNodeId = hostDetailsDao.findDetail(hostId, MAAS_ID_KEY); + if (maasNodeId != null) { + maasNode = maasApi.getMaasNode(maasNodeId.getValue()); + if(maasNode == null) { + maasUniqueId = maasNode.getSystemId(); + addMassMachine(maasMachine); + } + } + } + } catch (IOException e) { + String errMesg = "Error adding machine " + _ip + " Error: " + e.getMessage() + " Check MAAS and remove host if already added and retry again"; + s_logger.warn(errMesg, e); + throw new CloudRuntimeException(errMesg, e); + } + + HostVO host = hostDao.findByGuid(_uuid); + if (host != null) { + updateHostAddedDetails(host.getId()); + } + } + + // Node Import + else { + try { + maasNode = maasApi.getMaasNode(maasUniqueId); + if(maasNode != null) { + maasUniqueId = maasNode.getSystemId(); + _cpuNum = maasNode.getCpuCount(); + _cpuCapacity = maasNode.getCpuSpeed(); + _memCapacity = maasNode.getMemory() * 1024 * 1024; + + MaasInterface minterface = Arrays.asList(maasNode.getInterfaceSet()) + .stream() + .filter(i -> i.type.equals("physical")) + .findFirst() + .get(); + + if (minterface != null) { + _mac = minterface.macAddress; + } + } + } catch (IOException e) { + String errMesg = "Error adding machine " + maasUniqueId + " Error: " + e.getMessage() + " Check MAAS and add the selecte node."; + s_logger.warn(errMesg, e); + throw new CloudRuntimeException(errMesg, e); + } + } + + return true; + } + + private void addMassMachine(MaasObject.AddMachineParameters maasMachine) throws IOException { + if (BaremetalManagerImpl.pxeVlan.value() != null) { + vlanMgr.prepareVlan(BaremetalManagerImpl.pxeVlan.value(), _mac, VlanType.UNTAGGED); + } + + maasNode = maasApi.addMachine(maasMachine); + + //make the default NIC DHCP + MaasObject.MaasInterface bootInterface = maasNode.getBootInterface(); + int interfaceId = bootInterface.id; + int linkId = bootInterface.links[0].id; + int subnetId = bootInterface.links[0].subnet.id; + maasApi.setInterface(maasNode.getSystemId(), interfaceId, linkId, subnetId, true); + + //make sure all the other interfaces are on the same fabric/vlan to enable bonding + for (MaasObject.MaasInterface iface : maasNode.getInterfaceSet()) { + if (!iface.macAddress.equals(bootInterface.macAddress)) { + if (BaremetalManagerImpl.pxeVlan.value() != null) { + vlanMgr.prepareVlan(BaremetalManagerImpl.pxeVlan.value(), iface.macAddress, VlanType.UNTAGGED); + } + Integer lId = null; + if (iface.links != null && iface.links.length > 0) { + lId = iface.links[0].id; + } + maasApi.setInterface(maasNode.getSystemId(), iface.id, lId, subnetId, false); + } + } + + //update maas node + maasNode = maasApi.getMaasNode(maasNode.getSystemId()); + } + + public void updateHostAddedDetails(long hostId) { + if (this.hostId == null) { + this.hostId = hostId; + DetailVO maasIdDetail = new DetailVO(hostId, MAAS_ID_KEY, maasNode.getSystemId()); + hostDetailsDao.persist(maasIdDetail); + } + } + + private NicTO getDefaultNic(VirtualMachineTO vm) { + for (NicTO nic : vm.getNics()) { + if (nic.isDefaultNic()) { + return nic; + } + } + return null; + } + + /** + * Returns all the MACs that are connected to the switch for this host. + * @param node MaasNode + * @return + */ + protected List getAllConnectedMacs(MaasObject.MaasNode node) { + Set rackMacs = new HashSet(); + Set maasMacs = new HashSet(); + + List vos = rctDao.listAll(); + if (vos.isEmpty()) { + throw new CloudRuntimeException("no rack configuration found, please call addBaremetalRct to add one"); + } + + BaremetalRctVO vo = vos.get(0); + BaremetalRct rct = gson.fromJson(vo.getRct(), BaremetalRct.class); + + for (BaremetalRct.Rack rack : rct.getRacks()) { + for (BaremetalRct.HostEntry host : rack.getHosts()) { + rackMacs.add(host.getMac()); + } + } + + for (MaasObject.MaasInterface maasInterface : node.interfaceSet) { + maasMacs.add(maasInterface.macAddress); + } + + maasMacs.retainAll(rackMacs); + return new ArrayList(maasMacs); + } + + protected boolean isConnectedInterface(MaasObject.MaasNode node, String macAddress) { + return getAllConnectedMacs(node).contains(macAddress); + } + + public void setupMaasBonding(MaasObject.MaasNode node, String mac) throws IOException { + MaasObject.MaasInterface bondInterface = null; + List phyInterfaceIds = new ArrayList<>(); + + for (MaasObject.MaasInterface maasInterface: node.interfaceSet) { + if (maasInterface.type.equals(MaasObject.InterfaceType.bond.toString())) { + bondInterface = maasInterface; + } else if (maasInterface.type.equals(MaasObject.InterfaceType.physical.toString()) + && isConnectedInterface(node, maasInterface.macAddress)) { + phyInterfaceIds.add(maasInterface.id); + } + } + + if (bondInterface == null) { + assert phyInterfaceIds.size() >= 2; + bondInterface = maasApi.createBondInterface(node.systemId, phyInterfaceIds); + } + + MaasObject.MaasSubnet dhcpSubnet = maasApi.getDhcpSubnet(); + maasApi.setInterface(node.systemId, bondInterface.id, bondInterface.links[0].id, dhcpSubnet.id, true); + maasApi.updateInterfaceMac(node.systemId, bondInterface.id, mac); + } + + private void releaseVlan(int vlan, VlanType type, boolean releaseAll) { + for (String mac : getAllConnectedMacs(maasNode)) { + if (releaseAll) { + vlanMgr.releaseAllVlan(mac, type); + } else { + vlanMgr.releaseVlan(vlan, mac, type); + } + } + } + + private void prepareVlan(int vlan, VlanType type) { + for (String mac : getAllConnectedMacs(maasNode)) { + vlanMgr.prepareVlan(vlan, mac, type); + } + } +} diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasServiceOfferingsResponse.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasServiceOfferingsResponse.java new file mode 100644 index 000000000000..fbeccd5cb7d0 --- /dev/null +++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/MaasServiceOfferingsResponse.java @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Automatically generated by addcopyright.py at 01/29/2013 +package org.apache.cloudstack.compute.maas; + +import com.google.gson.annotations.SerializedName; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; + +import com.cloud.serializer.Param; + +@EntityReference(value = MaasServiceOfferingsResponse.class) +public class MaasServiceOfferingsResponse extends BaseResponse { + @SerializedName(ApiConstants.BAREMETAL_MAAS_OFFERING_ID) + @Param(description = "service offering id") + private String offeringId; + + @SerializedName(ApiConstants.BAREMETAL_MAAS_OFFERING_NAME) + @Param(description = "TODO") + private String offeringName; + + @SerializedName(ApiConstants.BAREMETAL_MAAS_AVIALBALE_COUNT) + @Param(description = "TODO") + private Integer available; + + @SerializedName(ApiConstants.BAREMETAL_MAAS_TOTAL_COUNT) + @Param(description = "TODO") + private Integer total; + + @SerializedName(ApiConstants.BAREMETAL_MAAS_ERASING_COUNT) + @Param(description = "TODO") + private Integer erasing; + + public String getOfferingId() { + return offeringId; + } + + public void setOfferingId(String offeringId) { + this.offeringId = offeringId; + } + + public String getOfferingName() { + return offeringName; + } + + public void setOfferingName(String offeringName) { + this.offeringName = offeringName; + } + + public Integer getAvailable() { + return available; + } + + public void setAvailable(Integer available) { + this.available = available; + } + + public Integer getTotal() { + return total; + } + + public void setTotal(Integer total) { + this.total = total; + } + + public Integer getErasing() { + return erasing; + } + + public void setErasing(Integer erasing) { + this.erasing = erasing; + } +} diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/api/ListMaasServiceOfferingsCmd.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/api/ListMaasServiceOfferingsCmd.java new file mode 100644 index 000000000000..2474303ed968 --- /dev/null +++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/compute/maas/api/ListMaasServiceOfferingsCmd.java @@ -0,0 +1,99 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Automatically generated by addcopyright.py at 01/29/2013 +// Apache License, Version 2.0 (the "License"); you may not use this +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// Automatically generated by addcopyright.py at 04/03/2012 +package org.apache.cloudstack.compute.maas.api; + +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.BaseCmd.CommandType; +import org.apache.cloudstack.api.response.ClusterResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.compute.maas.MaasServiceOfferingsResponse; +import org.apache.cloudstack.compute.maas.MaasManager; +import org.apache.log4j.Logger; + +@APICommand( + name = "listMaasServiceOfferings", + description = "list baremetal maas service offerings", + responseObject = MaasServiceOfferingsResponse.class, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User} +) +public class ListMaasServiceOfferingsCmd extends BaseListCmd { + private static final Logger LOGGER = Logger.getLogger(ListMaasServiceOfferingsCmd.class); + private static final String NAME = "listmaasserviceofferingsresponse"; + + @Inject + private MaasManager manager; + + // /////////////////////////////////////////////////// + // ////////////// API parameters ///////////////////// + // /////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, description = "The ID of the zone to lists maas service offerings for") + private Long zoneId; + + @Parameter(name = ApiConstants.CLUSTER_ID, type = CommandType.UUID, entityType = ClusterResponse.class, description = "The ID of the cluster to lists maas service offerings for") + private Long clusterId; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + public Long getZoneId() { + return zoneId; + } + public Long getClusterId() { + return clusterId; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + @Override + public void execute() { + try { + List responses = manager.listMaasServiceOfferings(this); + ListResponse response = new ListResponse(); + response.setResponses(responses, responses.size()); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } catch (Exception e) { + LOGGER.debug("Exception happend while executing ListMaasServiceOfferingsCmd", e); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); + } + } + + @Override + public String getCommandName() { + return NAME; + } +} diff --git a/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/baremetal-discoverer/spring-baremetal-discoverer-context.xml b/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/baremetal-discoverer/spring-baremetal-discoverer-context.xml index 12b287714a84..476d2ccdb589 100644 --- a/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/baremetal-discoverer/spring-baremetal-discoverer-context.xml +++ b/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/baremetal-discoverer/spring-baremetal-discoverer-context.xml @@ -27,6 +27,12 @@ http://www.springframework.org/schema/context/spring-context.xsd" > + + + + + + diff --git a/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/core/spring-baremetal-core-context.xml b/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/core/spring-baremetal-core-context.xml index 993fe40458c0..dc6ef492888a 100755 --- a/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/core/spring-baremetal-core-context.xml +++ b/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/core/spring-baremetal-core-context.xml @@ -27,15 +27,21 @@ http://www.springframework.org/schema/context/spring-context.xsd" > + + + + + + diff --git a/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/maas-compute/module.properties b/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/maas-compute/module.properties new file mode 100644 index 000000000000..22aaa235210c --- /dev/null +++ b/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/maas-compute/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=maas-compute +parent=compute diff --git a/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/maas-compute/spring-maas-compute-context.xml b/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/maas-compute/spring-maas-compute-context.xml new file mode 100644 index 000000000000..77c549f39741 --- /dev/null +++ b/plugins/hypervisors/baremetal/src/main/resources/META-INF/cloudstack/maas-compute/spring-maas-compute-context.xml @@ -0,0 +1,30 @@ + + + + diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index d75d03d85a43..b05a61638a8e 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -2319,9 +2319,12 @@ public String configureVPCNetworkUsage(final String privateIpAddress, final Stri } public long[] getVPCNetworkStats(final String privateIP, final String publicIp, final String option) { - final String result = configureVPCNetworkUsage(privateIP, publicIp, option, null); + String result = configureVPCNetworkUsage(privateIP, publicIp, option, null); final long[] stats = new long[2]; if (result != null) { + if (result.contains(",")) { + result = result.split(",")[0]; + } final String[] splitResult = result.split(":"); int i = 0; while (i < splitResult.length - 1) { diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3VirtualRoutingSupport.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3VirtualRoutingSupport.java index a9d673958779..1bcd258874d9 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3VirtualRoutingSupport.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3VirtualRoutingSupport.java @@ -147,6 +147,9 @@ private NetworkUsageAnswer vpcNetworkUsage(NetworkUsageCommand cmd) { if (result == null || result.isEmpty()) { LOGGER.error(" vpc network usage get returns empty "); } + if (result.contains(",")) { + result = result.split(",")[0]; + } long[] stats = new long[2]; if (result != null) { String[] splitResult = result.split(":"); diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 04e9dd4cc969..959ce101e112 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -1133,6 +1133,9 @@ protected NetworkUsageAnswer VPCNetworkUsage(NetworkUsageCommand cmd) { if (result == null || result.isEmpty()) { s_logger.error(" vpc network usage get returns empty "); } + if (result.contains(",")) { + result = result.split(",")[0]; + } long[] stats = new long[2]; if (result != null) { String[] splitResult = result.split(":"); diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java index 49af92cd51d0..ce8d10d94ae4 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java @@ -54,6 +54,8 @@ import org.apache.cloudstack.diagnostics.CopyToSecondaryStorageAnswer; import org.apache.cloudstack.diagnostics.CopyToSecondaryStorageCommand; import org.apache.cloudstack.diagnostics.DiagnosticsService; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.hypervisor.xenserver.ExtraConfigurationUtility; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; @@ -180,12 +182,12 @@ * before you do any changes in this code here. * */ -public abstract class CitrixResourceBase extends ServerResourceBase implements ServerResource, HypervisorResource, VirtualRouterDeployer { +public abstract class CitrixResourceBase extends ServerResourceBase implements ServerResource, HypervisorResource, VirtualRouterDeployer, Configurable { /** * used to describe what type of resource a storage device is of */ public enum SRType { - EXT, ISO, LVM, LVMOHBA, LVMOISCSI, + EXT, ISO, LVM, LVMOHBA, LVMOISCSI, VHDOISCSI, VDILUN, /** * used for resigning metadata (like SR UUID and VDI UUID when a * particular storage manager is installed on a XenServer host (for back-end snapshots to work)) @@ -298,6 +300,13 @@ private static boolean isAlienVm(final VM vm, final Connection conn) throws XenA protected String _configDriveSRName = "ConfigDriveISOs"; public String _attachIsoDeviceNum = "3"; + public static final ConfigKey XenServerManagedStorageSrType = new ConfigKey<>("Advanced", String.class, + "xenserver.managedstorage.srtype", + "lvmoiscsi", + "The type of SR to use when using managed storage for VDI-per-LUN (lvmoiscsi or vdilun)", + true, + ConfigKey.Scope.Zone); + protected XenServerUtilitiesHelper xenServerUtilitiesHelper = new XenServerUtilitiesHelper(); protected int _wait; @@ -1218,7 +1227,7 @@ public VBD createVbd(final Connection conn, final DiskTO volume, final String vm return vbd; } - public VDI createVdi(final SR sr, final String vdiNameLabel, final Long volumeSize) throws Types.XenAPIException, XmlRpcException { + public VDI createVdi(final SR sr, final String vdiNameLabel, final Long volumeSize, Map smConfig) throws Types.XenAPIException, XmlRpcException { final Connection conn = getConnection(); final VDI.Record vdir = new VDI.Record(); @@ -1231,6 +1240,10 @@ public VDI createVdi(final SR sr, final String vdiNameLabel, final Long volumeSi final long unavailableSrSpace = sr.getPhysicalUtilisation(conn); final long availableSrSpace = totalSrSpace - unavailableSrSpace; + if (smConfig != null) { + vdir.smConfig = smConfig; + } + if (availableSrSpace < volumeSize) { throw new CloudRuntimeException("Available space for SR cannot be less than " + volumeSize + "."); } @@ -1240,6 +1253,31 @@ public VDI createVdi(final SR sr, final String vdiNameLabel, final Long volumeSi return VDI.create(conn, vdir); } + public VDI introduceVDI(final SR sr, final String vdiNameLabel, final Long volumeSize, String uuid, String iqn) throws Types.XenAPIException, XmlRpcException { + + final Connection conn = getConnection(); + Map smConfig = new HashMap<>(); + + smConfig.put("targetIQN", iqn); + + if (uuid == null) { + uuid = UUID.randomUUID().toString(); + } + try { + return VDI.introduce(conn, uuid, vdiNameLabel, vdiNameLabel, sr, Types.VdiType.USER, + false, false, new HashMap(), uuid, new HashMap(), + smConfig, false, volumeSize, volumeSize, null, false, new Date(0), null); + } catch (Types.XenAPIException e) { + if (e.shortDescription.contains("VDI could not be found") || ((e instanceof Types.InternalError) && ((Types.InternalError)e).message.contains("Vdi_does_not_exist"))) { + // We could not find a VDI, this can happen when we try to attach a newly created + // We return null here. For all other exceptions, we raise them + return null; + } + s_logger.error("Error introducing VDI " + e.getMessage()); + throw new CloudRuntimeException(e.getMessage()); + } + } + public void createVGPU(final Connection conn, final StartCommand cmd, final VM vm, final GPUDeviceTO gpuDevice) throws XenAPIException, XmlRpcException { } @@ -1369,6 +1407,10 @@ public VM createVmFromTemplate(final Connection conn, final VirtualMachineTO vmS } } + if(vmSpec.getFormat().equals(Storage.ImageFormat.PXEBOOT)) { + vmr.HVMBootParams.put("order", "ndc"); + } + final VM vm = VM.create(conn, vmr); s_logger.debug("Created VM " + vm.getUuid(conn) + " for " + vmSpec.getName()); @@ -1941,6 +1983,7 @@ protected void finalizeVmMetaData(final VM vm, final VM.Record vmr, final Connec final Map platform = com.cloud.utils.StringUtils.stringToMap(platformstring); syncPlatformAndCoresPerSocketSettings(coresPerSocket, platform); vm.setPlatform(conn, platform); + calculateCorePerSocket(vm, conn, vmSpec); } else { final String timeoffset = details.get(VmDetailConstants.TIME_OFFSET); if (timeoffset != null) { @@ -1948,9 +1991,13 @@ protected void finalizeVmMetaData(final VM vm, final VM.Record vmr, final Connec platform.put(VmDetailConstants.TIME_OFFSET, timeoffset); vm.setPlatform(conn, platform); } - if (coresPerSocket != null) { + + calculateCorePerSocket(vm, conn, vmSpec); + + final String nestedHvm = details.get("nested.hvm"); + if (nestedHvm != null) { final Map platform = vm.getPlatform(conn); - syncPlatformAndCoresPerSocketSettings(coresPerSocket, platform); + platform.put("exp-nested-hvm", nestedHvm); vm.setPlatform(conn, platform); } } @@ -1986,6 +2033,18 @@ protected void setVmBootDetails(final VM vm, final Connection conn, String bootT vm.setPlatform(conn, platform); } + private void calculateCorePerSocket(final VM vm, final Connection conn, final VirtualMachineTO vmSpec) throws XmlRpcException, XenAPIException { + int coresPerSocketCalculated; + int cpus = vmSpec.getCpus(); + if (cpus % 2 == 1) { + coresPerSocketCalculated = 1; + } else { + coresPerSocketCalculated = cpus / 2; + } + final Map platform = vm.getPlatform(conn); + platform.put("cores-per-socket", Integer.toString(coresPerSocketCalculated)); + vm.setPlatform(conn, platform); + } /** * This method just creates a XenServer network following the tunnel network * naming convention @@ -2497,6 +2556,61 @@ public SR getIscsiSR(final Connection conn, final String srNameLabel, final Stri } } + public SR getVdiLunSr(Connection conn, String storageHost) { + try { + final Map deviceConfig = new HashMap(); + final Set srs = SR.getAll(conn); + for (final SR sr : srs) { + if (!(SRType.VDILUN.equals(sr.getType(conn)))) { + continue; + } + final Set pbds = sr.getPBDs(conn); + if (pbds.isEmpty()) { + continue; + } + + final PBD pbd = pbds.iterator().next(); + final Map dc = pbd.getDeviceConfig(conn); + if (dc == null) { + continue; + } + if (dc.get("target") == null) { + continue; + } + + if (storageHost.equals(dc.get("target"))) { + return sr; + } + } + + // came here, could not find an SR, create one + deviceConfig.put("target", storageHost); + String srNameLabel = "Cloudstack-VDILUN-SR-" + storageHost; + final Host host = Host.getByUuid(conn, _host.getUuid()); + return SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, SRType.VDILUN.toString(), + "user", true, new HashMap()); + } catch (Exception e) { + String mesg = "Unable to find/create VDILUN SR due to: " + e.getMessage(); + s_logger.warn(mesg); + throw new CloudRuntimeException(mesg); + } + } + + public String getTargetIqn(String iqnPath) { + if (iqnPath.endsWith("/")) { + iqnPath = iqnPath.substring(0, iqnPath.length() - 1); + } + + final String tmp[] = iqnPath.split("/"); + if (tmp.length != 3) { + final String msg = "Wrong iscsi path " + iqnPath + " it should be /targetIQN/LUN"; + s_logger.warn(msg); + throw new CloudRuntimeException(msg); + } + final String targetiqn = tmp[1].trim(); + return targetiqn; + } + private SR introduceAndPlugIscsiSr(Connection conn, String pooluuid, String srNameLabel, String type, Map smConfig, Map deviceConfig, boolean ignoreIntroduceException) throws XmlRpcException, XenAPIException { SR sr = null; @@ -3586,10 +3700,24 @@ protected String getXMLNodeValue(final Node n) { return n.getChildNodes().item(0).getNodeValue(); } - public void handleSrAndVdiDetach(final String iqn, final Connection conn) throws Exception { - final SR sr = getStorageRepository(conn, iqn); + public void handleManagedSrAndVdiDetach(final String iqn, final String storageHost, final Connection conn) throws Exception { + SR sr = null; + if (SRType.VDILUN.equals(XenServerManagedStorageSrType.value())) { + sr = getVdiLunSr(conn, storageHost); + String targetIqn = getTargetIqn(iqn); + VDI vdi = getVDIbyLocationandSR(conn, targetIqn, sr); + if (vdi != null){ + vdi.forget(conn); + } + + } else { + sr = getStorageRepository(conn, iqn); + removeSR(conn, sr); + } + } + + public void handleManagedSrRemove() { - removeSR(conn, sr); } protected void destroyUnattachedVBD(Connection conn, VM vm) { @@ -4234,6 +4362,14 @@ public VDI prepareManagedDisk(final Connection conn, final DiskTO disk, final lo return null; } + // TODO for VDILUN sr, we need to first find the SR by the target IP (not by IQN) + // then if such an SR exists, we have to look at its sm_config map to see if + // a VDI exists which matches the given IQN. If we find such a VDI, we return it, + // else, we *introduce* that VDI into the SR, this will ensure that the data on + // the LUN is not zeroed out (VDI create does that). Now there is a caveat, if the + // volume is cloned, we need to introduce it, if it is a fresh volume, we need to + // create it (as the LUN will not have the VDI inside it yet) + final String iqn = details.get(DiskTO.IQN); final Set srNameLabels = SR.getByNameLabel(conn, iqn); @@ -4244,7 +4380,7 @@ public VDI prepareManagedDisk(final Connection conn, final DiskTO disk, final lo final String vdiNameLabel = Volume.Type.ROOT.equals(disk.getType()) ? ("ROOT-" + vmId) : (vmName + "-DATA"); - return prepareManagedStorage(conn, details, null, vdiNameLabel); + return prepareManagedStorage(conn, details, disk.getPath(), vdiNameLabel); } protected SR prepareManagedSr(final Connection conn, final Map details) { @@ -4261,6 +4397,8 @@ protected SR prepareManagedSr(final Connection conn, final Map d final String volumedesc = storageHost + ":" + mountpoint; return getNfsSR(conn, poolid, namelable, storageHost, mountpoint, volumedesc); + } else if (SRType.VDILUN.equals(XenServerManagedStorageSrType.value())) { + return getVdiLunSr(conn, storageHost); } else { return getIscsiSR(conn, iScsiName, storageHost, iScsiName, chapInitiatorUsername, chapInitiatorSecret, false, SRType.LVMOISCSI.toString(), true); } @@ -4271,16 +4409,28 @@ protected VDI prepareManagedStorage(final Connection conn, final Map smConfig = new HashMap<>(); + String iqn = getTargetIqn(details.get(DiskTO.IQN)); + smConfig.put("targetIQN", iqn); Set vdisInSr = sr.getVDIs(conn); - // If a VDI already exists in the SR (in case we cloned from a template cache), use that. - if (vdisInSr.size() == 1) { - vdi = vdisInSr.iterator().next(); + if (SRType.VDILUN.equals(XenServerManagedStorageSrType.value())) { + + vdi = getVDIbyLocationandSR(conn, iqn, sr); + if (vdi == null) { + vdi = introduceVDI(sr, vdiNameLabel, volumeSize, path, iqn); + } + + } else { + // If a VDI already exists in the SR (in case we cloned from a template cache), use that. + if (vdisInSr.size() == 1) { + vdi = vdisInSr.iterator().next(); + } } if (vdi == null) { - vdi = createVdi(sr, vdiNameLabel, volumeSize); + vdi = createVdi(sr, vdiNameLabel, volumeSize, smConfig); } else { // If vdi is not null, it must have already been created, so check whether a resize of the volume was performed. // If true, resize the VDI to the volume size. @@ -4707,6 +4857,14 @@ public void scaleVM(final Connection conn, final VM vm, final VirtualMachineTO v // vm.addToVCPUsParamsLive(conn, "weight", // Integer.toString(cpuWeight)); callHostPlugin(conn, "vmops", "add_to_VCPUs_params_live", "key", "weight", "value", Integer.toString(cpuWeight), "vmname", vmSpec.getName()); + + // Recalculating cores per socket + final VM.Record vmr = vm.getRecord(conn); + try { + finalizeVmMetaData(vm, vmr, conn, vmSpec); + } catch (final Exception e) { + throw new CloudRuntimeException("Unable to finalize VM MetaData: " + vmSpec); + } } } @@ -5750,4 +5908,12 @@ private void umountNfs(Connection conn, String remoteDir, String localDir) { s_logger.warn(errMsg); } } + + public ConfigKey[] getConfigKeys(){ + return new ConfigKey[] {XenServerManagedStorageSrType}; + } + + public String getConfigComponentName(){ + return CitrixResourceBase.class.getSimpleName(); + } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java index 7c289de80c75..0640c6a88208 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java @@ -75,6 +75,7 @@ import com.cloud.storage.Storage; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.resource.StorageProcessor; +import com.cloud.utils.NumbersUtil; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.storage.S3.ClientOptions; import com.google.common.annotations.VisibleForTesting; @@ -175,6 +176,14 @@ public ResignatureAnswer resignature(final ResignatureCommand cmd) { final String storageHost = details.get(DiskTO.STORAGE_HOST); final String chapInitiatorUsername = details.get(DiskTO.CHAP_INITIATOR_USERNAME); final String chapInitiatorSecret = details.get(DiskTO.CHAP_INITIATOR_SECRET); + final ResignatureAnswer resignatureAnswer = new ResignatureAnswer(); + + if (SRType.VDILUN.equals(CitrixResourceBase.XenServerManagedStorageSrType.value())) { + resignatureAnswer.setSize(NumbersUtil.parseLong(details.get(DiskTO.VOLUME_SIZE), 0)); + resignatureAnswer.setPath(details.get(DiskTO.PATH)); + resignatureAnswer.setFormat(ImageFormat.VHD); + return resignatureAnswer; + } newSr = hypervisorResource.getIscsiSR(conn, iScsiName, storageHost, iScsiName, chapInitiatorUsername, chapInitiatorSecret, true, false); @@ -186,8 +195,6 @@ public ResignatureAnswer resignature(final ResignatureCommand cmd) { VDI vdi = vdis.iterator().next(); - final ResignatureAnswer resignatureAnswer = new ResignatureAnswer(); - resignatureAnswer.setSize(vdi.getVirtualSize(conn)); resignatureAnswer.setPath(vdi.getUuid(conn)); resignatureAnswer.setFormat(ImageFormat.VHD); @@ -496,7 +503,11 @@ public Answer dettachVolume(final DettachCommand cmd) { } if (cmd.isManaged()) { - hypervisorResource.handleSrAndVdiDetach(cmd.get_iScsiName(), conn); + + final PrimaryDataStoreTO store = (PrimaryDataStoreTO) data.getDataStore(); + String storageHost = store.getHost(); + + hypervisorResource.handleManagedSrAndVdiDetach(cmd.get_iScsiName(), storageHost, conn); } return new DettachAnswer(disk); @@ -506,11 +517,14 @@ public Answer dettachVolume(final DettachCommand cmd) { } } - protected VDI createVdi(final Connection conn, final String vdiName, final SR sr, final long size) throws BadServerResponse, XenAPIException, XmlRpcException { + protected VDI createVdi(final Connection conn, final String vdiName, final SR sr, final long size, Map smConfig) throws BadServerResponse, XenAPIException, XmlRpcException { final VDI.Record vdir = new VDI.Record(); vdir.nameLabel = vdiName; vdir.SR = sr; vdir.type = Types.VdiType.USER; + if (smConfig != null) { + vdir.smConfig = smConfig; + } vdir.virtualSize = size; final VDI vdi = VDI.create(conn, vdir); @@ -601,7 +615,7 @@ public Answer deleteVolume(final DeleteCommand cmd) { } protected boolean IsISCSI(final String type) { - return SRType.LVMOHBA.equals(type) || SRType.LVMOISCSI.equals(type) || SRType.LVM.equals(type); + return SRType.LVMOHBA.equals(type) || SRType.LVMOISCSI.equals(type) || SRType.LVM.equals(type) || SRType.VDILUN.equals(type); } private String copy_vhd_from_secondarystorage(final Connection conn, final String mountpoint, final String sruuid, final int wait) { diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java index 68236f92ac44..f6a3de30db30 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java @@ -259,6 +259,7 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { final Connection conn = hypervisorResource.getConnection(); SR srcSr = null; SR destSr = null; + VDI destVdi = null; boolean removeSrAfterCopy = false; Task task = null; @@ -329,6 +330,14 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { details.put(DiskTO.CHAP_INITIATOR_SECRET, chapInitiatorSecret); destSr = hypervisorResource.prepareManagedSr(conn, details); + if (CitrixResourceBase.SRType.VDILUN.equals(CitrixResourceBase.XenServerManagedStorageSrType.value())) { + // we create a destination VDI as the SR is just a placeholder for LUNs + Map smConfig = new HashMap<>(); + //TODO: Auth + smConfig.put("targetIQN", hypervisorResource.getTargetIqn(managedStoragePoolName)); + destVdi = createVdi(conn, managedStoragePoolRootVolumeName, destSr, + Long.parseLong(managedStoragePoolRootVolumeSize), smConfig); + } } else { final String srName = CitrixHelper.getSRNameLabel(destStore.getUuid(), destStore.getPoolType(), destStore.getPath()); final Set srs = SR.getByNameLabel(conn, srName); @@ -344,7 +353,7 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { } } - task = srcVdi.copyAsync(conn, destSr, null, null); + task = srcVdi.copyAsync(conn, destSr, null, destVdi); // poll every 1 seconds , hypervisorResource.waitForTask(conn, task, 1000, wait * 1000); @@ -409,6 +418,14 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { } if (removeSrAfterCopy && destSr != null) { + if (CitrixResourceBase.SRType.VDILUN.equals(CitrixResourceBase.XenServerManagedStorageSrType.value()) && + destVdi != null) { + try { + destVdi.dbForget(conn); + } catch (XenAPIException | XmlRpcException e) { + s_logger.warn(e); + } + } hypervisorResource.removeSR(conn, destSr); } } @@ -524,6 +541,8 @@ public Answer backupSnapshot(final CopyCommand cmd) { String snapshotBackupUuid = null; boolean fullbackup = Boolean.parseBoolean(options.get("fullSnapshot")); Long physicalSize = null; + VDI srcVdi = null; + try { SR primaryStorageSR = null; @@ -536,11 +555,27 @@ public Answer backupSnapshot(final CopyCommand cmd) { final String storageHost = srcDetails.get(DiskTO.STORAGE_HOST); final String chapInitiatorUsername = srcDetails.get(DiskTO.CHAP_INITIATOR_USERNAME); final String chapInitiatorSecret = srcDetails.get(DiskTO.CHAP_INITIATOR_SECRET); - final String srType = CitrixResourceBase.SRType.LVMOISCSI.toString(); + final String srType = CitrixResourceBase.XenServerManagedStorageSrType.value(); + + if (CitrixResourceBase.SRType.VDILUN.equals(srType)){ + //introduce the IQN VDI + String targetIqn = hypervisorResource.getTargetIqn(iScsiName); + primaryStorageSR = hypervisorResource.getVdiLunSr(conn, storageHost); + srcVdi = hypervisorResource.getVDIbyLocationandSR(conn, targetIqn, primaryStorageSR); + + if (srcVdi == null) { + String tempUuid = UUID.randomUUID().toString(); + srcVdi = hypervisorResource.introduceVDI(primaryStorageSR, snapshotTO.getName(), snapshotTO.getPhysicalSize(), + tempUuid, targetIqn); + } - primaryStorageSR = hypervisorResource.getIscsiSR(conn, iScsiName, storageHost, iScsiName, chapInitiatorUsername, chapInitiatorSecret, false, srType, true); + } else { + + primaryStorageSR = hypervisorResource.getIscsiSR(conn, iScsiName, storageHost, iScsiName, + chapInitiatorUsername, chapInitiatorSecret, false, srType, true); + srcVdi = primaryStorageSR.getVDIs(conn).iterator().next(); + } - final VDI srcVdi = primaryStorageSR.getVDIs(conn).iterator().next(); if (srcVdi == null) { throw new InternalErrorException("Could not Find a VDI on the SR: " + primaryStorageSR.getNameLabel(conn)); } @@ -565,6 +600,9 @@ public Answer backupSnapshot(final CopyCommand cmd) { final String folder = destPath; String finalPath = null; + // make sure if secondary storage is capable of doing partial backup or not + fullbackup = fullbackup || !destStore.isPartialBackupCapable(); + final String localMountPoint = BaseMountPointOnHost + File.separator + UUID.nameUUIDFromBytes(secondaryStorageUrl.getBytes()).toString(); if (fullbackup) { SR snapshotSr = null; @@ -636,7 +674,12 @@ public Answer backupSnapshot(final CopyCommand cmd) { } if (primaryStore.isManaged()) { - hypervisorResource.removeSR(conn, primaryStorageSR); + if (CitrixResourceBase.SRType.VDILUN.equals(CitrixResourceBase.XenServerManagedStorageSrType.value()) && + srcVdi != null) { + srcVdi.forget(conn); + } else { + hypervisorResource.removeSR(conn, primaryStorageSR); + } } } } else { @@ -823,8 +866,10 @@ public Answer createVolumeFromSnapshot(final CopyCommand cmd) { } SR srcSr = null; VDI destVdi = null; + Map smConfig = null; SR primaryStorageSR = null; + final String srType = CitrixResourceBase.XenServerManagedStorageSrType.value(); try { if (pool.isManaged()) { @@ -834,9 +879,18 @@ public Answer createVolumeFromSnapshot(final CopyCommand cmd) { final String storageHost = destDetails.get(DiskTO.STORAGE_HOST); final String chapInitiatorUsername = destDetails.get(DiskTO.CHAP_INITIATOR_USERNAME); final String chapInitiatorSecret = destDetails.get(DiskTO.CHAP_INITIATOR_SECRET); - final String srType = CitrixResourceBase.SRType.LVMOISCSI.toString(); - primaryStorageSR = hypervisorResource.getIscsiSR(conn, iScsiName, storageHost, iScsiName, chapInitiatorUsername, chapInitiatorSecret, false, srType, true); + + if (CitrixResourceBase.SRType.VDILUN.equals(srType)) { + + primaryStorageSR = hypervisorResource.getVdiLunSr(conn, storageHost); + smConfig = new HashMap<>(); + smConfig.put("targetIQN", hypervisorResource.getTargetIqn(iScsiName)); + + } else { + primaryStorageSR = hypervisorResource.getIscsiSR(conn, iScsiName, storageHost, iScsiName, + chapInitiatorUsername, chapInitiatorSecret, false, srType, true); + } } else { primaryStorageSR = hypervisorResource.getSRByNameLabelandHost(conn, primaryStorageNameLabel); @@ -847,7 +901,7 @@ public Answer createVolumeFromSnapshot(final CopyCommand cmd) { } final String nameLabel = "cloud-" + UUID.randomUUID().toString(); - destVdi = createVdi(conn, nameLabel, primaryStorageSR, volume.getSize()); + destVdi = createVdi(conn, nameLabel, primaryStorageSR, volume.getSize(), smConfig); volumeUUID = destVdi.getUuid(conn); final String snapshotInstallPath = snapshot.getPath(); final int index = snapshotInstallPath.lastIndexOf(File.separator); @@ -899,7 +953,17 @@ public Answer createVolumeFromSnapshot(final CopyCommand cmd) { } if (pool.isManaged()) { - hypervisorResource.removeSR(conn, primaryStorageSR); + if (CitrixResourceBase.SRType.VDILUN.equals(srType)) { + if (destVdi != null) { + try { + destVdi.forget(conn); + } catch (Exception e) { + s_logger.warn("Error removing vdi after copy " + e.getMessage()); + } + } + } else { + hypervisorResource.removeSR(conn, primaryStorageSR); + } } if (!result && destVdi != null) { @@ -1059,7 +1123,7 @@ public Answer createTemplateFromSnapshot(final CopyCommand cmd) { final DataTO destData = cmd.getDestTO(); if (srcData.getDataStore() instanceof PrimaryDataStoreTO && destData.getDataStore() instanceof NfsTO) { - return createTemplateFromSnapshot2(cmd); + return createTemplateFromSnapshotManagedStorage(cmd); } final int wait = cmd.getWait(); @@ -1129,7 +1193,7 @@ public Answer createTemplateFromSnapshot(final CopyCommand cmd) { final long templateVirtualSize = snapshotChains.get(0).getVirtualSize(conn); - destVdi = createVdi(conn, nameLabel, destSr, templateVirtualSize); + destVdi = createVdi(conn, nameLabel, destSr, templateVirtualSize, null); final String destVdiUuid = destVdi.getUuid(conn); @@ -1198,7 +1262,7 @@ public Answer createTemplateFromSnapshot(final CopyCommand cmd) { } } - private Answer createTemplateFromSnapshot2(final CopyCommand cmd) { + public Answer createTemplateFromSnapshotManagedStorage(final CopyCommand cmd) { final Connection conn = hypervisorResource.getConnection(); final SnapshotObjectTO snapshotObjTO = (SnapshotObjectTO)cmd.getSrcTO(); @@ -1227,6 +1291,8 @@ private Answer createTemplateFromSnapshot2(final CopyCommand cmd) { VDI destVdi = null; boolean result = false; + String srType = CitrixResourceBase.XenServerManagedStorageSrType.value(); + VDI srcVdi = null; try { final Map srcDetails = cmd.getOptions(); @@ -1235,11 +1301,23 @@ private Answer createTemplateFromSnapshot2(final CopyCommand cmd) { final String storageHost = srcDetails.get(DiskTO.STORAGE_HOST); final String chapInitiatorUsername = srcDetails.get(DiskTO.CHAP_INITIATOR_USERNAME); final String chapInitiatorSecret = srcDetails.get(DiskTO.CHAP_INITIATOR_SECRET); - String srType; - srType = CitrixResourceBase.SRType.LVMOISCSI.toString(); + if (CitrixResourceBase.SRType.VDILUN.equals(srType)) { + String iqn = hypervisorResource.getTargetIqn(iScsiName); + srcSr = hypervisorResource.getVdiLunSr(conn, storageHost); + srcVdi = hypervisorResource.getVDIbyLocationandSR(conn, iqn, srcSr); - srcSr = hypervisorResource.getIscsiSR(conn, iScsiName, storageHost, iScsiName, chapInitiatorUsername, chapInitiatorSecret, false, srType, true); + if (srcVdi == null) { + String tempUuid = UUID.randomUUID().toString(); + srcVdi = hypervisorResource.introduceVDI(srcSr, snapshotObjTO.getName(), snapshotObjTO.getPhysicalSize(), + tempUuid, iqn); + } + + } else { + srcSr = hypervisorResource.getIscsiSR(conn, iScsiName, storageHost, iScsiName, chapInitiatorUsername, chapInitiatorSecret, false, srType, true); + // there should only be one VDI in this SR + srcVdi = srcSr.getVDIs(conn).iterator().next(); + } final String destNfsPath = destUri.getHost() + ":" + destUri.getPath(); final String localDir = BASE_MOUNT_POINT_ON_REMOTE + UUID.nameUUIDFromBytes(destNfsPath.getBytes()); @@ -1250,8 +1328,6 @@ private Answer createTemplateFromSnapshot2(final CopyCommand cmd) { destSr = createFileSR(conn, localDir + "/" + destDir); - // there should only be one VDI in this SR - final VDI srcVdi = srcSr.getVDIs(conn).iterator().next(); destVdi = srcVdi.copy(conn, destSr); @@ -1314,7 +1390,17 @@ private Answer createTemplateFromSnapshot2(final CopyCommand cmd) { } if (srcSr != null) { - hypervisorResource.removeSR(conn, srcSr); + if (CitrixResourceBase.SRType.VDILUN.equals(srType)) { + if (srcVdi != null) { + try { + srcVdi.forget(conn); + } catch (Exception e) { + s_logger.warn("Error cleaning srcVdi for src snapshot " + snapshotObjTO.getId()); + } + } + } else { + hypervisorResource.removeSR(conn, srcSr); + } } if (destSr != null) { diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56NetworkUsageCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56NetworkUsageCommandWrapper.java index ad414a4ea321..dff8271d805b 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56NetworkUsageCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56NetworkUsageCommandWrapper.java @@ -79,13 +79,17 @@ protected NetworkUsageAnswer executeNetworkUsage(final NetworkUsageCommand comma } final ExecutionResult result = xenServer56.executeInVR(command.getPrivateIP(), "vpc_netusage.sh", args); - final String detail = result.getDetails(); + String detail = result.getDetails(); if (!result.isSuccess()) { throw new Exception(" vpc network usage plugin call failed "); } + if (option.equals("get") || option.equals("vpn")) { final long[] stats = new long[2]; if (detail != null) { + if (detail.contains(",")) { + detail = detail.split(",")[0]; + } final String[] splitResult = detail.split(":"); int i = 0; while (i < splitResult.length - 1) { diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java index 766335812390..9f2cdccb84f2 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java @@ -64,12 +64,18 @@ public Answer execute(final DeleteStoragePoolCommand command, final CitrixResour return answer; } catch (final Exception e) { - final String msg = "DeleteStoragePoolCommand XenAPIException:" + e.getMessage() + " host:" + citrixResourceBase.getHost().getUuid() + - " pool: " + poolTO.getHost() + poolTO.getPath(); + // if error is "Can not see storage pool" return "success" it most + // probably has been already removed, otherwise throw an actual error. + if (e.getMessage().contains("Can not see storage pool")) { + return new Answer(command, true, "success"); + } else { + final String msg = "DeleteStoragePoolCommand XenAPIException:" + e.getMessage() + " host:" + citrixResourceBase.getHost().getUuid() + + " pool: " + poolTO.getHost() + poolTO.getPath(); - s_logger.error(msg, e); + s_logger.error(msg, e); - return new Answer(command, false, msg); + return new Answer(command, false, msg); + } } } } \ No newline at end of file diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixModifyStoragePoolCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixModifyStoragePoolCommandWrapper.java index 888aa81d3746..83e3116472b1 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixModifyStoragePoolCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixModifyStoragePoolCommandWrapper.java @@ -48,12 +48,25 @@ public Answer execute(final ModifyStoragePoolCommand command, final CitrixResour final Connection conn = citrixResourceBase.getConnection(); final StorageFilerTO pool = command.getPool(); final boolean add = command.getAdd(); + final Map tInfo = new HashMap(); + + if (add) { try { String srName = command.getStoragePath(); if (srName == null) { srName = CitrixHelper.getSRNameLabel(pool.getUuid(), pool.getType(), pool.getPath()); } + + if(CitrixResourceBase.SRType.VDILUN.equals(CitrixResourceBase.XenServerManagedStorageSrType.value()) && + pool.isManaged()){ + + final SR sr = citrixResourceBase.getVdiLunSr(conn, pool.getHost()); + long capacity = sr.getPhysicalSize(conn); // TODO handle this gracefully + + return new ModifyStoragePoolAnswer(command, capacity, capacity, tInfo); + } + final SR sr = citrixResourceBase.getStorageRepository(conn, srName); citrixResourceBase.setupHeartbeatSr(conn, sr, false); final long capacity = sr.getPhysicalSize(conn); @@ -63,7 +76,6 @@ public Answer execute(final ModifyStoragePoolCommand command, final CitrixResour s_logger.warn(msg); return new Answer(command, false, msg); } - final Map tInfo = new HashMap(); final ModifyStoragePoolAnswer answer = new ModifyStoragePoolAnswer(command, capacity, available, tInfo); return answer; } catch (final XenAPIException e) { diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixResizeVolumeCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixResizeVolumeCommandWrapper.java index e7505cc2f34e..711ee88ae24c 100755 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixResizeVolumeCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixResizeVolumeCommandWrapper.java @@ -59,9 +59,11 @@ public Answer execute(final ResizeVolumeCommand command, final CitrixResourceBas resizeSr(conn, command); } - VDI vdi = citrixResourceBase.getVDIbyUuid(conn, volId); + VDI vdi = citrixResourceBase.getVDIbyUuid(conn, volId, false); - vdi.resize(conn, newSize); + if (vdi != null) { + vdi.resize(conn, newSize); + } return new ResizeVolumeAnswer(command, true, "success", newSize); } catch (Exception ex) { diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStopCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStopCommandWrapper.java index 1a74ff4385bf..bf9867cd05fc 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStopCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStopCommandWrapper.java @@ -43,6 +43,8 @@ import com.xensource.xenapi.SR; import com.xensource.xenapi.Types.VmPowerState; import com.xensource.xenapi.Types.XenAPIException; +import com.xensource.xenapi.VBD; +import com.xensource.xenapi.VDI; import com.xensource.xenapi.VGPU; import com.xensource.xenapi.VIF; import com.xensource.xenapi.VM; @@ -138,6 +140,19 @@ public Answer execute(final StopCommand command, final CitrixResourceBase citrix command.setGpuDevice(new GPUDeviceTO(null, null, groupDetails)); } + if (CitrixResourceBase.SRType.VDILUN.equals(CitrixResourceBase.XenServerManagedStorageSrType.value())) { + Set vbds = vm.getVBDs(conn); + for (VBD vbd : vbds) { + VDI vdi = vbd.getVDI(conn); + if (!vdi.isNull()) { + SR sr = vdi.getSR(conn); + if (sr.getType(conn).equals(CitrixResourceBase.SRType.VDILUN.toString())) { + vdi.forget(conn); + } + } + } + } + final Set vifs = vm.getVIFs(conn); final List networks = new ArrayList(); for (final VIF vif : vifs) { diff --git a/plugins/network-elements/nuage-vsp/pom.xml b/plugins/network-elements/nuage-vsp/pom.xml new file mode 100644 index 000000000000..a54414c233a4 --- /dev/null +++ b/plugins/network-elements/nuage-vsp/pom.xml @@ -0,0 +1,46 @@ + + + 4.0.0 + cloud-plugin-network-vsp + Apache CloudStack Plugin - Nuage VSP + + org.apache.cloudstack + cloudstack-plugins + 4.13.2.0-SNAPSHOT + ../../pom.xml + + + + nuage-vsp + http://cs.mv.nuagenetworks.net/releases/ + + + + 1.0.8 + + + + net.nuagenetworks.vsp + nuage-vsp-acs-client + ${nuage.vsp.client.version} + + + diff --git a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightElement.java b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightElement.java index 9bae4bd19e6f..658e691d19e5 100644 --- a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightElement.java +++ b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightElement.java @@ -19,19 +19,6 @@ package org.apache.cloudstack.network.opendaylight; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - -import org.apache.cloudstack.network.opendaylight.agent.commands.StartupOpenDaylightControllerCommand; - import com.cloud.agent.api.StartupCommand; import com.cloud.deploy.DeployDestination; import com.cloud.exception.ConcurrentOperationException; @@ -55,6 +42,16 @@ import com.cloud.vm.NicProfile; import com.cloud.vm.ReservationContext; import com.cloud.vm.VirtualMachineProfile; +import org.apache.cloudstack.network.opendaylight.agent.commands.StartupOpenDaylightControllerCommand; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; @Component public class OpendaylightElement extends AdapterBase implements ConnectivityProvider, ResourceStateAdapter { @@ -156,7 +153,7 @@ public HostVO createHostVOForDirectConnectAgent(HostVO host, StartupCommand[] st @Override public DeleteHostAnswer deleteHost(HostVO host, boolean isForced, boolean isForceDeleteStorage) throws UnableDeleteHostException { - return new DeleteHostAnswer(true); + return null; } private static Map> setCapabilities() { diff --git a/plugins/storage/image/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackImageStoreDriverImpl.java b/plugins/storage/image/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackImageStoreDriverImpl.java index 8abf802d9de6..5017bf90d7cb 100644 --- a/plugins/storage/image/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackImageStoreDriverImpl.java +++ b/plugins/storage/image/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackImageStoreDriverImpl.java @@ -22,11 +22,6 @@ import javax.inject.Inject; -import com.cloud.agent.api.storage.DeleteEntityDownloadURLCommand; -import com.cloud.host.dao.HostDao; -import com.cloud.storage.Upload; -import org.apache.log4j.Logger; - import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; @@ -35,13 +30,17 @@ import org.apache.cloudstack.storage.image.NfsImageStoreDriverImpl; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; import org.apache.cloudstack.storage.image.store.ImageStoreImpl; +import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.CreateEntityDownloadURLCommand; +import com.cloud.agent.api.storage.DeleteEntityDownloadURLCommand; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.NfsTO; import com.cloud.configuration.Config; +import com.cloud.host.dao.HostDao; import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.Upload; import com.cloud.utils.exception.CloudRuntimeException; public class CloudStackImageStoreDriverImpl extends NfsImageStoreDriverImpl { @@ -73,6 +72,7 @@ public String createEntityExtractUrl(DataStore store, String installPath, ImageF String uuid = UUID.randomUUID().toString() + "." + format.getFileExtension(); CreateEntityDownloadURLCommand cmd = new CreateEntityDownloadURLCommand(((ImageStoreEntity)store).getMountPoint(), path, uuid, dataObject == null ? null: dataObject.getTO()); + cmd.setSecUrl(((ImageStoreEntity) store).getUrl()); Answer ans = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; diff --git a/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/driver/SwiftImageStoreDriverImpl.java b/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/driver/SwiftImageStoreDriverImpl.java index 7e1486214bcf..6afb4a052af0 100644 --- a/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/driver/SwiftImageStoreDriverImpl.java +++ b/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/driver/SwiftImageStoreDriverImpl.java @@ -18,17 +18,16 @@ */ package org.apache.cloudstack.storage.datastore.driver; -import java.net.URL; -import java.util.Map; -import java.util.UUID; - -import javax.inject.Inject; - +import com.cloud.agent.api.storage.DownloadAnswer; +import com.cloud.agent.api.to.DataObjectType; +import com.cloud.agent.api.to.DataStoreTO; +import com.cloud.agent.api.to.SwiftTO; import com.cloud.configuration.Config; +import com.cloud.storage.RegisterVolumePayload; +import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.download.DownloadMonitor; import com.cloud.utils.SwiftUtil; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; - +import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; @@ -36,21 +35,22 @@ import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; import org.apache.cloudstack.engine.subsystem.api.storage.StorageCacheManager; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.command.DownloadCommand; import org.apache.cloudstack.storage.datastore.db.ImageStoreDetailsDao; import org.apache.cloudstack.storage.image.BaseImageStoreDriverImpl; import org.apache.cloudstack.storage.image.store.ImageStoreImpl; import org.apache.cloudstack.storage.to.TemplateObjectTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.log4j.Logger; -import com.cloud.agent.api.storage.DownloadAnswer; -import com.cloud.agent.api.to.DataObjectType; -import com.cloud.agent.api.to.DataStoreTO; -import com.cloud.agent.api.to.SwiftTO; -import com.cloud.storage.Storage.ImageFormat; -import com.cloud.template.VirtualMachineTemplate; -import com.cloud.utils.exception.CloudRuntimeException; +import javax.inject.Inject; +import java.net.URL; +import java.util.Map; +import java.util.UUID; public class SwiftImageStoreDriverImpl extends BaseImageStoreDriverImpl { private static final Logger s_logger = Logger.getLogger(SwiftImageStoreDriverImpl.class); @@ -63,6 +63,8 @@ public class SwiftImageStoreDriverImpl extends BaseImageStoreDriverImpl { StorageCacheManager cacheManager; @Inject ConfigurationDao _configDao; + @Inject + private DownloadMonitor _downloadMonitor; @Override public DataStoreTO getStoreTO(DataStore store) { @@ -100,12 +102,28 @@ public String createEntityExtractUrl(DataStore store, String installPath, ImageF @Override public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback callback) { - Long maxTemplateSizeInBytes = getMaxTemplateSizeInBytes(); - VirtualMachineTemplate tmpl = _templateDao.findById(data.getId()); + + DownloadCommand downloadCommand = null; + if (data.getType() == DataObjectType.TEMPLATE) { + Long maxTemplateSizeInBytes = getMaxTemplateSizeInBytes(); + downloadCommand = new DownloadCommand((TemplateObjectTO) (data.getTO()), maxTemplateSizeInBytes); + }else if (data.getType() == DataObjectType.VOLUME){ + Long maxDownloadSizeInBytes = getMaxVolumeSizeInBytes(); + VolumeInfo volumeInfo = (VolumeInfo) data; + RegisterVolumePayload payload = (RegisterVolumePayload) volumeInfo.getpayload(); + ImageFormat format = ImageFormat.valueOf(payload.getFormat()); + downloadCommand = new DownloadCommand((VolumeObjectTO) (data.getTO()), maxDownloadSizeInBytes, payload.getChecksum(), payload.getUrl(), format); + } + + if (downloadCommand == null){ + String errMsg = "Unable to build download command, DataObject is of neither VOLUME or TEMPLATE type"; + s_logger.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + DataStore cacheStore = cacheManager.getCacheStorage(dataStore.getScope()); - DownloadCommand dcmd = new DownloadCommand((TemplateObjectTO)(data.getTO()), maxTemplateSizeInBytes); - dcmd.setCacheStore(cacheStore.getTO()); - dcmd.setProxy(getHttpProxy()); + downloadCommand.setCacheStore(cacheStore.getTO()); + downloadCommand.setProxy(getHttpProxy()); EndPoint ep = _epSelector.select(data); if (ep == null) { @@ -120,11 +138,11 @@ public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCal if (data.getType() == DataObjectType.TEMPLATE) { caller.setCallback(caller.getTarget().createTemplateAsyncCallback(null, null)); + ep.sendMessageAsync(downloadCommand, caller); } else if (data.getType() == DataObjectType.VOLUME) { caller.setCallback(caller.getTarget().createVolumeAsyncCallback(null, null)); + _downloadMonitor.downloadVolumeToStorage(data,caller); } - ep.sendMessageAsync(dcmd, caller); - } } diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java index 703f9a1e4e43..14a61e028577 100644 --- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java @@ -17,6 +17,8 @@ package org.apache.cloudstack.storage.datastore.driver; +import static com.cloud.utils.NumbersUtil.toHumanReadableSize; + import java.io.UnsupportedEncodingException; import java.util.ArrayList; import java.util.HashMap; @@ -37,6 +39,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.command.CreateObjectAnswer; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -61,7 +65,6 @@ import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; -import com.cloud.hypervisor.Hypervisor; import com.cloud.storage.ResizeVolumePayload; import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotVO; @@ -82,47 +85,38 @@ import com.google.common.base.Preconditions; import com.google.common.primitives.Ints; -import static com.cloud.utils.NumbersUtil.toHumanReadableSize; - -public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver { +public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver, Configurable { private static final Logger s_logger = Logger.getLogger(DateraPrimaryDataStoreDriver.class); private static final int s_lockTimeInSeconds = 300; private static final int s_lowestHypervisorSnapshotReserve = 10; - - @Inject - private ClusterDao _clusterDao; - @Inject - private ClusterDetailsDao _clusterDetailsDao; - @Inject - private HostDao _hostDao; - @Inject - private SnapshotDao _snapshotDao; - @Inject - private SnapshotDetailsDao _snapshotDetailsDao; - @Inject - private PrimaryDataStoreDao _storagePoolDao; - @Inject - private StoragePoolDetailsDao _storagePoolDetailsDao; - @Inject - private VolumeDao _volumeDao; - @Inject - private VMTemplatePoolDao tmpltPoolDao; - @Inject - private PrimaryDataStoreDao storagePoolDao; - @Inject - private VolumeDetailsDao volumeDetailsDao; - @Inject - private SnapshotDetailsDao snapshotDetailsDao; - @Inject - private VolumeDataFactory volumeDataFactory; + private static final int KBPS_MULTIPLIER = 4; //4k blocks + private static final String SEPERATOR_SNAPSHOT = "::"; + + @Inject private ClusterDao _clusterDao; + @Inject private ClusterDetailsDao _clusterDetailsDao; + @Inject private HostDao _hostDao; + @Inject private SnapshotDao _snapshotDao; + @Inject private SnapshotDetailsDao _snapshotDetailsDao; + @Inject private PrimaryDataStoreDao _storagePoolDao; + @Inject private StoragePoolDetailsDao _storagePoolDetailsDao; + @Inject private VolumeDao _volumeDao; + @Inject private VMTemplatePoolDao tmpltPoolDao; + @Inject private PrimaryDataStoreDao storagePoolDao; + @Inject private VolumeDetailsDao volumeDetailsDao; + @Inject private SnapshotDetailsDao snapshotDetailsDao; + @Inject private VolumeDataFactory volumeDataFactory; + + private static final ConfigKey MaxIopsScalingFactor = new ConfigKey("Advanced", Float.class, "storage.managedstorage.datera.iops.factor", "1.0", + "The amount by which to scale the bandwidth when applying Datera.", true, ConfigKey.Scope.Zone); /** - * Returns a map which lists the capabilities that this storage device can - * offer. Currently supported STORAGE_SYSTEM_SNAPSHOT: Has the ability to create - * native snapshots CAN_CREATE_VOLUME_FROM_SNAPSHOT: Can create new volumes from - * native snapshots. CAN_CREATE_VOLUME_FROM_VOLUME: Device can clone volumes. - * This is used for template caching. + * Returns a map which lists the capabilities that this storage device can offer. Currently supported + * STORAGE_SYSTEM_SNAPSHOT: Has the ability to create native snapshots + * CAN_CREATE_VOLUME_FROM_SNAPSHOT: Can create new volumes from native snapshots. + * CAN_CREATE_VOLUME_FROM_VOLUME: Device can clone volumes. This is used for template caching. + * * @return a Map which determines the capabilities of the driver + * */ @Override public Map getCapabilities() { @@ -131,7 +125,6 @@ public Map getCapabilities() { mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.TRUE.toString()); mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString()); mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString()); - mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString()); return mapCapabilities; } @@ -154,7 +147,7 @@ public ChapInfo getChapInfo(DataObject dataObject) { /** * Fetches an App Instance from Datera, throws exception if it doesn't find it - * @param conn Datera Connection + * @param conn Datera Connection * @param appInstanceName Name of the Aplication Instance * @return application instance */ @@ -168,7 +161,7 @@ public DateraObject.AppInstance getDateraAppInstance(DateraObject.DateraConnecti throw new CloudRuntimeException(dateraError.getMessage()); } - if (appInstance == null) { + if (appInstance == null){ throw new CloudRuntimeException("App instance not found " + appInstanceName); } @@ -176,22 +169,19 @@ public DateraObject.AppInstance getDateraAppInstance(DateraObject.DateraConnecti } /** - * Given a {@code dataObject} this function makes sure that the {@code host} has - * access to it. All hosts which are in the same cluster are added to an - * initiator group and that group is assigned to the appInstance. If an - * initiator group does not exist, it is created. If the host does not have an - * initiator registered on dataera, that is created and added to the initiator - * group + * Given a {@code dataObject} this function makes sure that the {@code host} has access to it. + * All hosts which are in the same cluster are added to an initiator group and that group is assigned + * to the appInstance. If an initiator group does not exist, it is created. If the host does not have + * an initiator registered on dataera, that is created and added to the initiator group + * * @param dataObject The volume that needs to be accessed - * @param host The host which needs to access the volume - * @param dataStore Identifies which primary storage the volume resides in + * @param host The host which needs to access the volume + * @param dataStore Identifies which primary storage the volume resides in * @return True if access is granted. False otherwise */ @Override public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) { - s_logger.debug("grantAccess() called"); - Preconditions.checkArgument(dataObject != null, "'dataObject' should not be 'null'"); Preconditions.checkArgument(host != null, "'host' should not be 'null'"); Preconditions.checkArgument(dataStore != null, "'dataStore' should not be 'null'"); @@ -223,20 +213,18 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore List hosts = _hostDao.findByClusterId(clusterId); if (!DateraUtil.hostsSupport_iScsi(hosts)) { - s_logger.debug("hostsSupport_iScsi() :Host does NOT support iscsci"); return false; } // We don't have the initiator group, create one - String initiatorGroupName = DateraUtil.INITIATOR_GROUP_PREFIX + "-" + cluster.getUuid(); - s_logger.debug("Will use initiator group " + String.valueOf(initiatorGroupName)); + String initiatorGroupName = DateraUtil.INITIATOR_GROUP_PREFIX + "-" + cluster.getUuid(); initiatorGroup = DateraUtil.getInitiatorGroup(conn, initiatorGroupName); if (initiatorGroup == null) { - s_logger.debug("create initiator group " + String.valueOf(initiatorGroupName)); + initiatorGroup = DateraUtil.createInitiatorGroup(conn, initiatorGroupName); - // Save it to the DB + //Save it to the DB ClusterDetailsVO clusterDetail = new ClusterDetailsVO(clusterId, initiatorGroupKey, initiatorGroupName); _clusterDetailsDao.persist(clusterDetail); @@ -244,36 +232,20 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore initiatorGroup = DateraUtil.getInitiatorGroup(conn, initiatorGroupName); } - Preconditions.checkNotNull(initiatorGroup, "initiatorGroup should not be Null"); + Preconditions.checkNotNull(initiatorGroup); - // We create an initiator for every host in this cluster and add it to the - // initator group + // We create an initiator for every host in this cluster and add it to the initator group addClusterHostsToInitiatorGroup(conn, clusterId, initiatorGroupName); - // assgin the initiatorgroup to appInstance - + //assgin the initiatorgroup to appInstance if (!isInitiatorGroupAssignedToAppInstance(conn, initiatorGroup, appInstance)) { DateraUtil.assignGroupToAppInstance(conn, initiatorGroupName, appInstanceName); - int retries = DateraUtil.DEFAULT_RETRIES; - while (!isInitiatorGroupAssignedToAppInstance(conn, initiatorGroup, appInstance) && retries > 0) { - Thread.sleep(DateraUtil.POLL_TIMEOUT_MS); - retries--; - } - - Preconditions.checkArgument(isInitiatorGroupAssignedToAppInstance(conn, initiatorGroup, appInstance), - "Initgroup is not assigned to appinstance"); - // FIXME: Sleep anyways - s_logger.debug("sleep " + String.valueOf(DateraUtil.POLL_TIMEOUT_MS) + " msec for ACL to be applied"); - - Thread.sleep(DateraUtil.POLL_TIMEOUT_MS); // ms - s_logger.debug( - "Initiator group " + String.valueOf(initiatorGroupName) + " is assigned to " + appInstanceName); - + DateraUtil.pollAppInstanceAvailable(conn, appInstanceName); } return true; - } catch (DateraObject.DateraError | UnsupportedEncodingException | InterruptedException dateraError) { - s_logger.warn(dateraError.getMessage(), dateraError); + } catch (DateraObject.DateraError | UnsupportedEncodingException dateraError) { + s_logger.warn(dateraError.getMessage(), dateraError ); throw new CloudRuntimeException("Unable to grant access to volume " + dateraError.getMessage()); } finally { lock.unlock(); @@ -281,31 +253,28 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore } } - private void addClusterHostsToInitiatorGroup(DateraObject.DateraConnection conn, long clusterId, - String initiatorGroupName) throws DateraObject.DateraError, UnsupportedEncodingException { + private void addClusterHostsToInitiatorGroup(DateraObject.DateraConnection conn, long clusterId, String initiatorGroupName) throws DateraObject.DateraError, UnsupportedEncodingException { List clusterHosts = _hostDao.findByClusterId(clusterId); DateraObject.InitiatorGroup initiatorGroup = DateraUtil.getInitiatorGroup(conn, initiatorGroupName); for (HostVO host : clusterHosts) { - // check if we have an initiator for the host + //check if we have an initiator for the host String iqn = host.getStorageUrl(); DateraObject.Initiator initiator = DateraUtil.getInitiator(conn, iqn); - String initiatorName = ""; - // initiator can not be found, create it + + //initiator not found, create it if (initiator == null) { - initiatorName = DateraUtil.INITIATOR_PREFIX + "-" + host.getUuid(); + String initiatorName = DateraUtil.INITIATOR_PREFIX + "-" + host.getUuid(); initiator = DateraUtil.createInitiator(conn, initiatorName, iqn); - s_logger.debug("Initiator " + initiatorName + " with " + iqn + "added "); - } + Preconditions.checkNotNull(initiator); if (!DateraUtil.isInitiatorPresentInGroup(initiator, initiatorGroup)) { - s_logger.debug("Add " + initiatorName + " to " + initiatorGroupName); DateraUtil.addInitiatorToGroup(conn, initiator.getPath(), initiatorGroupName); } } @@ -313,23 +282,21 @@ private void addClusterHostsToInitiatorGroup(DateraObject.DateraConnection conn, /** * Checks if an initiator group is assigned to an appInstance - * @param conn Datera connection + * @param conn Datera connection * @param initiatorGroup Initiator group to check - * @param appInstance App Instance - * @return True if initiator group is assigned to app instnace, false otherwise + * @param appInstance App Instance + * @return True if initiator group is assigned to app instnace, false otherwise + * * @throws DateraObject.DateraError */ - private boolean isInitiatorGroupAssignedToAppInstance(DateraObject.DateraConnection conn, - DateraObject.InitiatorGroup initiatorGroup, DateraObject.AppInstance appInstance) - throws DateraObject.DateraError { + private boolean isInitiatorGroupAssignedToAppInstance(DateraObject.DateraConnection conn, DateraObject.InitiatorGroup initiatorGroup, DateraObject.AppInstance appInstance) throws DateraObject.DateraError { - Map assignedInitiatorGroups = DateraUtil - .getAppInstanceInitiatorGroups(conn, appInstance.getName()); + List assignedInitiatorGroups = DateraUtil.getAppInstanceInitiatorGroups(conn, appInstance.getName()); Preconditions.checkNotNull(assignedInitiatorGroups); - for (DateraObject.InitiatorGroup ig : assignedInitiatorGroups.values()) { + for (DateraObject.InitiatorGroup ig : assignedInitiatorGroups) { if (initiatorGroup.getName().equals(ig.getName())) { return true; } @@ -338,16 +305,17 @@ private boolean isInitiatorGroupAssignedToAppInstance(DateraObject.DateraConnect return false; } + /** - * Removes access of the initiator group to which {@code host} belongs from the - * appInstance given by {@code dataObject} + * Removes access of the initiator group to which {@code host} belongs from the appInstance + * given by {@code dataObject} + * * @param dataObject Datera volume - * @param host the host which is currently having access to the volume - * @param dataStore The primary store to which volume belongs + * @param host the host which is currently having access to the volume + * @param dataStore The primary store to which volume belongs */ @Override public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) { - s_logger.debug("revokeAccess() called"); Preconditions.checkArgument(dataObject != null, "'dataObject' should not be 'null'"); Preconditions.checkArgument(host != null, "'host' should not be 'null'"); @@ -367,7 +335,7 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) try { - String initiatorGroupName = DateraUtil.INITIATOR_GROUP_PREFIX + "-" + cluster.getUuid(); + String initiatorGroupName = DateraUtil.INITIATOR_GROUP_PREFIX + "-" + cluster.getUuid(); DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePoolId, _storagePoolDetailsDao); @@ -375,16 +343,16 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) DateraObject.InitiatorGroup initiatorGroup = DateraUtil.getInitiatorGroup(conn, initiatorGroupName); if (initiatorGroup != null && appInstance != null) { - DateraUtil.removeGroupFromAppInstance(conn, initiatorGroupName, appInstanceName); - int retries = DateraUtil.DEFAULT_RETRIES; - while (isInitiatorGroupAssignedToAppInstance(conn, initiatorGroup, appInstance) && retries > 0) { - Thread.sleep(DateraUtil.POLL_TIMEOUT_MS); - retries--; - } + DateraUtil.pollAppInstanceAvailable(conn, appInstanceName); + } + + if (dataObject.getType().equals(DataObjectType.TEMPLATE)) { + //Having the template offline reduces the time taken to clone + DateraUtil.updateAppInstanceAdminState(conn, appInstanceName, DateraObject.AppState.OFFLINE); } - } catch (DateraObject.DateraError | UnsupportedEncodingException | InterruptedException dateraError) { + } catch (DateraObject.DateraError | UnsupportedEncodingException dateraError) { String errMesg = "Error revoking access for Volume : " + dataObject.getId(); s_logger.warn(errMesg, dateraError); throw new CloudRuntimeException(errMesg); @@ -395,10 +363,11 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) } /** - * Returns the size of template on this primary storage. If we already have a - * template on this storage, we return 0 + * Returns the size of template on this primary storage. If we already have a template on this + * storage, we return 0 + * * @param templateInfo Information about the template - * @param storagePool The pool where we want to store the template + * @param storagePool The pool where we want to store the template * @return Size in bytes */ @Override @@ -409,72 +378,53 @@ public long getBytesRequiredForTemplate(TemplateInfo templateInfo, StoragePool s if (lstTemplatePoolRefs != null) { for (VMTemplateStoragePoolVO templatePoolRef : lstTemplatePoolRefs) { if (templatePoolRef.getTemplateId() == templateInfo.getId()) { - // This indicates that we already have this template stored on this primary - // storage, so + // This indicates that we already have this template stored on this primary storage, so // we do not require additional space. return 0; } } } - // This indicates that we do not have a copy of this template on this primary - // storage, so - // we need to take it into consideration from a space standpoint (ex. when a new - // VM is spun + // This indicates that we do not have a copy of this template on this primary storage, so + // we need to take it into consideration from a space standpoint (ex. when a new VM is spun // up and wants to use this particular template for its root disk). return getDataObjectSizeIncludingHypervisorSnapshotReserve(templateInfo, storagePool); } - /** - * Returns Datera appInstanceName - * @param dataObject volume or template - * @return Derived Datera appInstanceName based on dataObject, Eg. - * CS-V-ROOT-123-6db58e3f-14c4-45ac-95e9-60e3a00ce7d0 - */ private String getAppInstanceName(DataObject dataObject) { - ArrayList name = new ArrayList<>(); - name.add(DateraUtil.APPINSTANCE_PREFIX); // CS + name.add(DateraUtil.APPINSTANCE_PREFIX); + name.add(dataObject.getType().toString()); + name.add(dataObject.getUuid()); - String dataObjectTypeString = dataObject.getType().name(); // TEMPLATE, VOLUME, SNAPSHOT - String dataObjectTypeBrief; - dataObjectTypeBrief = StringUtils.substring(dataObjectTypeString, 0, 1); - name.add(dataObjectTypeBrief); // T, V - - switch (dataObject.getType()) { - case TEMPLATE: - TemplateInfo templateInfo = (TemplateInfo) dataObject; - - name.add(dataObject.getUuid()); // 6db58e3f-14c4-45ac-95e9-60e3a00ce7d0 - - // For cached templates, we will also add the storage pool ID + if (dataObject.getType() == DataObjectType.TEMPLATE){ + //For cached templates, we will also add the pool ID name.add(String.valueOf(dataObject.getDataStore().getId())); - break; - - case VOLUME: - VolumeInfo volumeInfo = (VolumeInfo) dataObject; - String volumeName = volumeInfo.getName(); - name.add(String.valueOf(volumeName)); - name.add(dataObject.getUuid()); // 6db58e3f-14c4-45ac-95e9-60e3a00ce7d0 - - VolumeVO volumeVo = _volumeDao.findById(dataObject.getId()); - s_logger.debug("volumeName : " + volumeName); - break; + } - case SNAPSHOT: - name.add(dataObject.getUuid()); // 6db58e3f-14c4-45ac-95e9-60e3a00ce7d0 + return StringUtils.join("-", name.toArray()); + } + private String getDescription(DataObject dataObject) { + String desc = "CSAccountId-"; + switch (dataObject.getType()) { + case VOLUME: + desc += Long.toString(((VolumeInfo) (dataObject)).getAccountId()); + break; + case TEMPLATE: + desc+= Long.toString(((TemplateInfo)(dataObject)).getAccountId()); + break; + case SNAPSHOT: + desc+= Long.toString(((SnapshotInfo)(dataObject)).getAccountId()); + break; } - - String appInstanceName = StringUtils.join("-", name.toArray()); - return StringUtils.substring(appInstanceName, 0, DateraUtil.APPINSTANCE_MAX_LENTH); + return desc; } // Not being used right now as Datera doesn't support min IOPS private long getDefaultMinIops(long storagePoolId) { - StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, - DateraUtil.CLUSTER_DEFAULT_MIN_IOPS); + StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, DateraUtil.CLUSTER_DEFAULT_MIN_IOPS); String clusterDefaultMinIops = storagePoolDetail.getValue(); @@ -484,12 +434,10 @@ private long getDefaultMinIops(long storagePoolId) { /** * If user doesn't specify the IOPS, use this IOPS * @param storagePoolId the primary storage - * @return default max IOPS for this storage configured when the storage is - * added + * @return default max IOPS for this storage configured when the storage is added */ private long getDefaultMaxIops(long storagePoolId) { - StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, - DateraUtil.CLUSTER_DEFAULT_MAX_IOPS); + StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, DateraUtil.CLUSTER_DEFAULT_MAX_IOPS); String clusterDefaultMaxIops = storagePoolDetail.getValue(); @@ -497,14 +445,12 @@ private long getDefaultMaxIops(long storagePoolId) { } /** - * Return the default number of replicas to use (configured at storage addition - * time) + * Return the default number of replicas to use (configured at storage addition time) * @param storagePoolId the primary storage * @return the number of replicas to use */ private int getNumReplicas(long storagePoolId) { - StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, - DateraUtil.NUM_REPLICAS); + StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, DateraUtil.NUM_REPLICAS); String clusterDefaultReplicas = storagePoolDetail.getValue(); @@ -512,38 +458,6 @@ private int getNumReplicas(long storagePoolId) { } - /** - * Return the default volume placement to use (configured at storage addition - * time) - * @param storagePoolId the primary storage - * @return volume placement string - */ - private String getVolPlacement(long storagePoolId) { - StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, - DateraUtil.VOL_PLACEMENT); - - String clusterDefaultVolPlacement = storagePoolDetail.getValue(); - - return clusterDefaultVolPlacement; - - } - - /** - * Return the default IP pool name to use (configured at storage addition time) - * @param storagePoolId the primary storage - * @return IP pool name - */ - private String getIpPool(long storagePoolId) { - String ipPool = DateraUtil.DEFAULT_IP_POOL; - StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePoolId, DateraUtil.IP_POOL); - if (storagePoolDetail != null) { - ipPool = storagePoolDetail.getValue(); - } - s_logger.debug("ipPool: " + ipPool); - return ipPool; - - } - @Override public long getUsedBytes(StoragePool storagePool) { return getUsedBytes(storagePool, Long.MIN_VALUE); @@ -551,14 +465,15 @@ public long getUsedBytes(StoragePool storagePool) { /** * Get the total space used by all the entities on the storage. + * * Total space = volume space + snapshot space + template space - * @param storagePool Primary storage - * @param volumeIdToIgnore Ignore this volume (used when we delete a volume and - * want to update the space) + * + * @param storagePool Primary storage + * @param volumeIdToIgnore Ignore this volume (used when we delete a volume and want to update the space) * @return size in bytes */ private long getUsedBytes(StoragePool storagePool, long volumeIdToIgnore) { - long usedSpaceBytes = 0; + long usedSpace = 0; List lstVolumes = _volumeDao.findByPoolId(storagePool.getId(), null); @@ -571,18 +486,20 @@ private long getUsedBytes(StoragePool storagePool, long volumeIdToIgnore) { VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volume.getId(), DateraUtil.VOLUME_SIZE); if (volumeDetail != null && volumeDetail.getValue() != null) { - long volumeSizeGib = Long.parseLong(volumeDetail.getValue()); - long volumeSizeBytes = DateraUtil.gibToBytes((int) (volumeSizeGib)); - usedSpaceBytes += volumeSizeBytes; - } else { - DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePool.getId(), - _storagePoolDetailsDao); + long volumeSize = Long.parseLong(volumeDetail.getValue()); + + usedSpace += volumeSize; + } + else { try { + DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePool.getId(), _storagePoolDetailsDao); String appInstanceName = getAppInstanceName(volumeDataFactory.getVolume(volume.getId())); DateraObject.AppInstance appInstance = DateraUtil.getAppInstance(conn, appInstanceName); if (appInstance != null) { - usedSpaceBytes += DateraUtil.gibToBytes(appInstance.getSize()); + long size = DateraUtil.gbToBytes(appInstance.getSize()); + usedSpace += size; + updateVolumeDetails(volume.getId(), size); } } catch (DateraObject.DateraError dateraError) { String errMesg = "Error getting used bytes for storage pool : " + storagePool.getId(); @@ -593,22 +510,21 @@ private long getUsedBytes(StoragePool storagePool, long volumeIdToIgnore) { } } + List lstSnapshots = _snapshotDao.listAll(); if (lstSnapshots != null) { for (SnapshotVO snapshot : lstSnapshots) { - SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshot.getId(), - DateraUtil.STORAGE_POOL_ID); + SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshot.getId(), DateraUtil.STORAGE_POOL_ID); // if this snapshot belongs to the storagePool that was passed in - if (snapshotDetails != null && snapshotDetails.getValue() != null - && Long.parseLong(snapshotDetails.getValue()) == storagePool.getId()) { + if (snapshotDetails != null && snapshotDetails.getValue() != null && Long.parseLong(snapshotDetails.getValue()) == storagePool.getId()) { snapshotDetails = _snapshotDetailsDao.findDetail(snapshot.getId(), DateraUtil.VOLUME_SIZE); if (snapshotDetails != null && snapshotDetails.getValue() != null) { long snapshotSize = Long.parseLong(snapshotDetails.getValue()); - usedSpaceBytes += snapshotSize; + usedSpace += snapshotSize; } } } @@ -618,17 +534,17 @@ private long getUsedBytes(StoragePool storagePool, long volumeIdToIgnore) { if (lstTemplatePoolRefs != null) { for (VMTemplateStoragePoolVO templatePoolRef : lstTemplatePoolRefs) { - usedSpaceBytes += templatePoolRef.getTemplateSize(); + usedSpace += templatePoolRef.getTemplateSize(); } } - s_logger.debug("usedSpaceBytes: " + toHumanReadableSize(usedSpaceBytes)); + s_logger.debug("usedSpaceBytes: " + toHumanReadableSize(usedSpace)); - return usedSpaceBytes; + return usedSpace; } /** - * Get total IOPS used by the storage array. Since Datera doesn't support min - * IOPS, return zero for now + * Get total IOPS used by the storage array. Since Datera doesn't support min IOPS, + * return zero for now * @param storagePool primary storage * @return total IOPS used */ @@ -639,10 +555,9 @@ public long getUsedIops(StoragePool storagePool) { } /** - * Rreturns the size of the volume including the hypervisor snapshot reserve - * (HSR). + * Rreturns the size of the volume including the hypervisor snapshot reserve (HSR). * @param dataObject Volume or a Template - * @param pool primary storage where it resides + * @param pool primary storage where it resides * @return size in bytes */ @@ -652,41 +567,33 @@ public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataO long volumeSize = 0; switch (dataObject.getType()) { - case VOLUME: + case VOLUME: + VolumeInfo volume = (VolumeInfo) dataObject; + volumeSize = volume.getSize(); + Integer hypervisorSnapshotReserve = volume.getHypervisorSnapshotReserve(); - VolumeInfo volume = (VolumeInfo) dataObject; - volumeSize = volume.getSize(); - Integer hypervisorSnapshotReserve = volume.getHypervisorSnapshotReserve(); + if (hypervisorSnapshotReserve != null) { + hypervisorSnapshotReserve = Math.max(hypervisorSnapshotReserve, s_lowestHypervisorSnapshotReserve); + } else { + hypervisorSnapshotReserve = s_lowestHypervisorSnapshotReserve; + } - if (hypervisorSnapshotReserve != null) { - hypervisorSnapshotReserve = Math.max(hypervisorSnapshotReserve, s_lowestHypervisorSnapshotReserve); volumeSize += volumeSize * (hypervisorSnapshotReserve / 100f); - } - s_logger.debug("Volume size: " + toHumanReadableSize(volumeSize)); - break; + break; - case TEMPLATE: - - TemplateInfo templateInfo = (TemplateInfo) dataObject; - long templateSize = templateInfo.getSize() != null ? templateInfo.getSize() : 0; - - if (templateInfo.getHypervisorType() == Hypervisor.HypervisorType.KVM) { - volumeSize = templateSize; - } else { - volumeSize = (long) (templateSize + templateSize * (s_lowestHypervisorSnapshotReserve / 100f)); - } - s_logger.debug("Template volume size:" + toHumanReadableSize(volumeSize)); + case TEMPLATE: - break; + TemplateInfo templateInfo = (TemplateInfo)dataObject; + volumeSize = (long)(templateInfo.getSize() + templateInfo.getSize() * (s_lowestHypervisorSnapshotReserve / 100f)); + break; } return volumeSize; } /** - * Deletes a volume from Datera. If we are using native snapshots, we first - * check if the volume is holding a native snapshot, if it does, then we don't - * delete it from Datera but instead mark it so that when the snapshot is - * deleted, we delete the volume + * Deletes a volume from Datera. If we are using native snapshots, we first check if the volume is holding + * a native snapshot, if it does, then we don't delete it from Datera but instead mark it so that when + * the snapshot is deleted, we delete the volume * * @param volumeInfo The volume which needs to be deleted * @param storagePoolId Primary storage where volume resides @@ -694,19 +601,17 @@ public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataO private void deleteVolume(VolumeInfo volumeInfo, long storagePoolId) { DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePoolId, _storagePoolDetailsDao); - Long volumeStoragePoolId = volumeInfo.getPoolId(); + Long volumeStoragePoolId = volumeInfo.getPoolId(); long volumeId = volumeInfo.getId(); if (volumeStoragePoolId == null) { - return; // this volume was never assigned to a storage pool, so no SAN volume should - // exist for it + return; // this volume was never assigned to a storage pool, so no SAN volume should exist for it } try { - // If there are native snapshots on this appInstance, we want to keep it on - // Datera - // but remove it from cloudstack + //If there are native snapshots on this appInstance, we want to keep it on Datera + //but remove it from cloudstack if (shouldDeleteVolume(volumeId, null)) { DateraUtil.deleteAppInstance(conn, getAppInstanceName(volumeInfo)); } @@ -727,28 +632,30 @@ private void deleteVolume(VolumeInfo volumeInfo, long storagePoolId) { } /** - * given a {@code volumeInfo} and {@code storagePoolId}, creates an App instance - * on Datera. Updates the usedBytes count in the DB for this storage pool. A - * volume could be created in 3 ways + * given a {@code volumeInfo} and {@code storagePoolId}, creates an App instance on Datera. + * Updates the usedBytes count in the DB for this storage pool. A volume could be created in + * 3 ways * - * 1) A fresh volume with no data: New volume created from Cloudstack + * 1) A fresh volume with no data: + * New volume created from Cloudstack * - * 2) A volume created from a native snapshot. This is used when creating volume - * from snapshot and native snapshots are supported + * 2) A volume created from a native snapshot. + * This is used when creating volume from + * snapshot and native snapshots are supported * - * 3) A volume created by cloning from another volume: This is used when - * creating volume from template or volume from snapshot stored as another - * volume when native snapshots are not supported by the hypervisor + * 3) A volume created by cloning from another volume: + * This is used when creating volume from template or + * volume from snapshot stored as another volume when + * native snapshots are not supported by the hypervisor * * - * @param volumeInfo Info about the volume like size,QoS + * @param volumeInfo Info about the volume like size,QoS * @param storagePoolId The pool to create the vo - * @return returns the IQN path which will be used by storage substem + * @return returns the IQN path which will be used by storage substem * */ private String createVolume(VolumeInfo volumeInfo, long storagePoolId) { - s_logger.debug("createVolume() called"); Preconditions.checkArgument(volumeInfo != null, "volumeInfo cannot be null"); Preconditions.checkArgument(storagePoolId > 0, "storagePoolId should be > 0"); @@ -761,79 +668,68 @@ private String createVolume(VolumeInfo volumeInfo, long storagePoolId) { long csSnapshotId = getCsIdForCloning(volumeInfo.getId(), "cloneOfSnapshot"); long csTemplateId = getCsIdForCloning(volumeInfo.getId(), "cloneOfTemplate"); - s_logger.debug("csTemplateId is " + String.valueOf(csTemplateId)); try { if (csSnapshotId > 0) { - // creating volume from snapshot. The snapshot could either be a native snapshot - // or another volume. - s_logger.debug("Creating volume from snapshot "); + //creating volume from snapshot. The snapshot could either be a native snapshot + //or another volume. appInstance = createDateraClone(conn, csSnapshotId, volumeInfo, storagePoolId, DataObjectType.SNAPSHOT); } else if (csTemplateId > 0) { // create volume from template. Invoked when creating new ROOT volume - s_logger.debug("Creating volume from template "); - appInstance = createDateraClone(conn, csTemplateId, volumeInfo, storagePoolId, DataObjectType.TEMPLATE); String appInstanceName = appInstance.getName(); - long volumeSize = getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo, - storagePoolDao.findById(storagePoolId)); + long volumeSize = getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo, storagePoolDao.findById(storagePoolId)); - // expand the template - if (volumeSize > DateraUtil.gibToBytes(appInstance.getSize())) { + //expand the template + if (volumeSize > DateraUtil.gbToBytes(appInstance.getSize())) { // Expand the volume to include HSR depending on the volume's service offering - DateraUtil.updateAppInstanceSize(conn, appInstanceName, DateraUtil.bytesToGib(volumeSize)); + DateraUtil.updateAppInstanceSize(conn, appInstanceName, DateraUtil.bytesToGb(volumeSize)); // refresh appInstance appInstance = DateraUtil.getAppInstance(conn, appInstanceName); Preconditions.checkNotNull(appInstance); - // update IOPS - if ((volumeInfo.getMaxIops() != null) && (volumeInfo.getMaxIops() != appInstance.getTotalIops())) { + + //update IOPS + if (volumeInfo.getMaxIops() != null && volumeInfo.getMaxIops() != toIops(appInstance.getTotalBandwidthKiBps())) { int newIops = Ints.checkedCast(volumeInfo.getMaxIops()); - DateraUtil.updateAppInstanceIops(conn, appInstanceName, newIops); + DateraUtil.updateAppInstanceIops(conn, appInstanceName, toBandwidthKiBps(newIops)); } + // refresh appInstance appInstance = DateraUtil.getAppInstance(conn, appInstanceName); } } else { - // Just create a standard volume - s_logger.debug("Creating a standard volume "); + //Just create a standard volume appInstance = createDateraVolume(conn, volumeInfo, storagePoolId); } - } catch (UnsupportedEncodingException | DateraObject.DateraError e) { + } catch(UnsupportedEncodingException| DateraObject.DateraError e) { String errMesg = "Unable to create Volume Error: " + e.getMessage(); s_logger.warn(errMesg); throw new CloudRuntimeException(errMesg, e); } - if (appInstance == null) { - String errMesg = "appInstance returned null"; - s_logger.warn(errMesg); - throw new CloudRuntimeException(errMesg); - } - Preconditions.checkNotNull(appInstance); + String iqn = appInstance.getIqn(); String iqnPath = DateraUtil.generateIqnPath(iqn); - VolumeVO volumeVo = _volumeDao.findById(volumeInfo.getId()); - s_logger.debug("volume ID : " + volumeInfo.getId()); - s_logger.debug("volume uuid : " + volumeInfo.getUuid()); + VolumeVO volume = _volumeDao.findById(volumeInfo.getId()); - volumeVo.set_iScsiName(iqnPath); - volumeVo.setFolder(appInstance.getName()); - volumeVo.setPoolType(Storage.StoragePoolType.IscsiLUN); - volumeVo.setPoolId(storagePoolId); + volume.set_iScsiName(iqnPath); + volume.setFolder(appInstance.getName()); + volume.setPoolType(Storage.StoragePoolType.IscsiLUN); + volume.setPoolId(storagePoolId); - _volumeDao.update(volumeVo.getId(), volumeVo); + _volumeDao.update(volume.getId(), volume); - updateVolumeDetails(volumeVo.getId(), appInstance.getSize()); + updateVolumeDetails(volume.getId(), appInstance.getSize()); StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId); @@ -847,116 +743,99 @@ private String createVolume(VolumeInfo volumeInfo, long storagePoolId) { return appInstance.getIqn(); } + private Long toIops(Integer totalBandwidthKiBps) { + + if (totalBandwidthKiBps == null) { + return null; + } + + return (long) (Math.round(totalBandwidthKiBps / (KBPS_MULTIPLIER * MaxIopsScalingFactor.value())) + 1); + } + + private int toBandwidthKiBps(int iops) { + return Math.round(iops * KBPS_MULTIPLIER * MaxIopsScalingFactor.value()); + } + /** - * Helper function to create a Datera app instance. Throws an exception if - * unsuccessful - * @param conn Datera connection + * Helper function to create a Datera app instance. Throws an exception if unsuccessful + * @param conn Datera connection * @param volumeInfo Volume information * @param storagePoolId primary storage - * @return The AppInstance which is created + * @return The AppInstance which is created * @throws UnsupportedEncodingException - * @throws DateraObject.DateraError + * @throws DateraObject.DateraError */ - private DateraObject.AppInstance createDateraVolume(DateraObject.DateraConnection conn, VolumeInfo volumeInfo, - long storagePoolId) throws UnsupportedEncodingException, DateraObject.DateraError { + private DateraObject.AppInstance createDateraVolume(DateraObject.DateraConnection conn, VolumeInfo volumeInfo, long storagePoolId) throws UnsupportedEncodingException, DateraObject.DateraError { - s_logger.debug("createDateraVolume() called"); - DateraObject.AppInstance appInstance = null; - try { + int minIops = Ints.checkedCast(getDefaultMinIops(storagePoolId)); + int maxIops = Ints.checkedCast(getDefaultMaxIops(storagePoolId)); - int minIops = Ints.checkedCast( - volumeInfo.getMinIops() != null ? volumeInfo.getMinIops() : getDefaultMinIops(storagePoolId)); - - // int minIops = Ints.checkedCast(volumeInfo.getMinIops()); - - int maxIops = Ints.checkedCast( - volumeInfo.getMaxIops() != null ? volumeInfo.getMaxIops() : getDefaultMaxIops(storagePoolId)); + if (volumeInfo.getMinIops() != null) { + minIops = Ints.checkedCast(volumeInfo.getMinIops()); + } - // int maxIops = Ints.checkedCast(volumeInfo.getMaxIops()); + if (volumeInfo.getMaxIops() != null) { + maxIops = Ints.checkedCast(Math.max(minIops, Ints.checkedCast(volumeInfo.getMaxIops()))); + } - if (maxIops <= 0) { // We don't care about min iops for now - maxIops = Ints.checkedCast(getDefaultMaxIops(storagePoolId)); - } + int replicas = getNumReplicas(storagePoolId); - int replicas = getNumReplicas(storagePoolId); - String volumePlacement = getVolPlacement(storagePoolId); - String ipPool = getIpPool(storagePoolId); + long volumeSizeBytes = getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo, _storagePoolDao.findById(storagePoolId)); + int volumeSizeGb = DateraUtil.bytesToGb(volumeSizeBytes); - long volumeSizeBytes = getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo, - _storagePoolDao.findById(storagePoolId)); - int volumeSizeGib = DateraUtil.bytesToGib(volumeSizeBytes); - if (volumePlacement == null) { - appInstance = DateraUtil.createAppInstance(conn, getAppInstanceName(volumeInfo), volumeSizeGib, maxIops, - replicas); - } else { - appInstance = DateraUtil.createAppInstance(conn, getAppInstanceName(volumeInfo), volumeSizeGib, maxIops, - replicas, volumePlacement, ipPool); - } - } catch (Exception ex) { - s_logger.debug("createDateraVolume() failed"); - s_logger.error(ex); - } - return appInstance; + return DateraUtil.createAppInstance(conn, getAppInstanceName(volumeInfo), getDescription(volumeInfo), volumeSizeGb, toBandwidthKiBps(maxIops), replicas); } /** - * This function creates a new AppInstance on datera by cloning. We can clone - * either from a volume snapshot (in case of native snapshots) or clone from - * another app Instance in case of templates or snapshots as volumes + * This function creates a new AppInstance on datera by cloning. We can clone either from a volume snapshot (in case of native snapshots) + * or clone from another app Instance in case of templates or snapshots as volumes * - * @param conn Datera Connection - * @param dataObjectId The ID of the clone, used to fetch details on how to - * clone - * @param volumeInfo Information about the clone + * @param conn Datera Connection + * @param dataObjectId The ID of the clone, used to fetch details on how to clone + * @param volumeInfo Information about the clone * @param storagePoolId Primary store to create the clone on - * @param dataType Type of the source (snapshot or template) + * @param dataType Type of the source (snapshot or template) * @return The cloned AppInstance */ - private DateraObject.AppInstance createDateraClone(DateraObject.DateraConnection conn, long dataObjectId, - VolumeInfo volumeInfo, long storagePoolId, DataObjectType dataType) - throws UnsupportedEncodingException, DateraObject.DateraError { - - s_logger.debug("createDateraClone() called"); + private DateraObject.AppInstance createDateraClone(DateraObject.DateraConnection conn, long dataObjectId, VolumeInfo volumeInfo, long storagePoolId, DataObjectType dataType) throws UnsupportedEncodingException, DateraObject.DateraError { String clonedAppInstanceName = getAppInstanceName(volumeInfo); String baseAppInstanceName = null; DateraObject.AppInstance appInstance = null; - String ipPool = getIpPool(storagePoolId); if (dataType == DataObjectType.SNAPSHOT) { SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(dataObjectId, DateraUtil.SNAPSHOT_ID); // Clone volume from a snapshot if (snapshotDetails != null && snapshotDetails.getValue() != null) { - s_logger.debug("Clone volume from a snapshot"); - appInstance = DateraUtil.cloneAppInstanceFromSnapshot(conn, clonedAppInstanceName, - snapshotDetails.getValue(), ipPool); + String[] tokens = snapshotDetails.getValue().split(SEPERATOR_SNAPSHOT); + Preconditions.checkArgument(tokens.length == 2); + String srcAppInstanceName = tokens[0]; + String snapshotTime = tokens[1]; + + appInstance = DateraUtil.cloneAppInstanceFromSnapshot(conn, clonedAppInstanceName, getDescription(volumeInfo), srcAppInstanceName, snapshotTime); if (volumeInfo.getMaxIops() != null) { int totalIops = Math.min(DateraUtil.MAX_IOPS, Ints.checkedCast(volumeInfo.getMaxIops())); - DateraUtil.updateAppInstanceIops(conn, clonedAppInstanceName, totalIops); + DateraUtil.updateAppInstanceIops(conn, clonedAppInstanceName, toBandwidthKiBps(totalIops)); appInstance = DateraUtil.getAppInstance(conn, clonedAppInstanceName); } - if (appInstance == null) { - throw new CloudRuntimeException("Unable to create an app instance from snapshot " - + volumeInfo.getId() + " type " + dataType); + if (appInstance == null){ + throw new CloudRuntimeException("Unable to create an app instance from snapshot " + volumeInfo.getId() + " type " + dataType); } return appInstance; } else { - // Clone volume from an appInstance - s_logger.debug("Clone volume from an appInstance"); - + //Clone volume from an appInstance snapshotDetails = snapshotDetailsDao.findDetail(dataObjectId, DateraUtil.VOLUME_ID); baseAppInstanceName = snapshotDetails.getValue(); } } else if (dataType == DataObjectType.TEMPLATE) { - s_logger.debug("Clone volume from a template"); VMTemplateStoragePoolVO templatePoolRef = tmpltPoolDao.findByPoolTemplate(storagePoolId, dataObjectId, null); @@ -966,68 +845,53 @@ private DateraObject.AppInstance createDateraClone(DateraObject.DateraConnection } if (baseAppInstanceName == null) { - throw new CloudRuntimeException( - "Unable to find a base volume to clone " + volumeInfo.getId() + " type " + dataType); + throw new CloudRuntimeException("Unable to find a base volume to clone " + volumeInfo.getId() + " type " + dataType); } - // Clone the app Instance - appInstance = DateraUtil.cloneAppInstanceFromVolume(conn, clonedAppInstanceName, baseAppInstanceName, ipPool); + //Clone the app Instance + appInstance = DateraUtil.cloneAppInstanceFromVolume(conn, clonedAppInstanceName, getDescription(volumeInfo), baseAppInstanceName); - if (dataType == DataObjectType.TEMPLATE) { - // Only update volume parameters if clone from cached template - // Update maxIops - if (volumeInfo.getMaxIops() != null) { - int totalIops = Math.min(DateraUtil.MAX_IOPS, Ints.checkedCast(volumeInfo.getMaxIops())); + if (volumeInfo.getMaxIops() != null) { - DateraUtil.updateAppInstanceIops(conn, clonedAppInstanceName, totalIops); - appInstance = DateraUtil.getAppInstance(conn, clonedAppInstanceName); - } - // Update placementMode - String newPlacementMode = getVolPlacement(storagePoolId); - if (newPlacementMode != null) { - DateraUtil.updateAppInstancePlacement(conn, clonedAppInstanceName, newPlacementMode); - } + int totalIops = Math.min(DateraUtil.MAX_IOPS, Ints.checkedCast(volumeInfo.getMaxIops())); + + DateraUtil.updateAppInstanceIops(conn, clonedAppInstanceName, toBandwidthKiBps(totalIops)); appInstance = DateraUtil.getAppInstance(conn, clonedAppInstanceName); } - if (appInstance == null) { - throw new CloudRuntimeException("Unable to create an app instance from snapshot or template " - + volumeInfo.getId() + " type " + dataType); + + if (appInstance == null){ + throw new CloudRuntimeException("Unable to create an app instance from snapshot " + volumeInfo.getId() + " type " + dataType); } - s_logger.debug("Datera - Cloned " + baseAppInstanceName + " to " + clonedAppInstanceName); return appInstance; } /** - * This function gets invoked when you want to do operations on a snapshot. The - * snapshot could be a native snapshot and you want to create a template out of - * it. Since snapshots don't have an IQN, we create a temp volume for this - * snapshot which will be used to carry out further operations. This function - * also handles deletion of temp volumes. A flag in the snapshot details table - * decides which action is performed. + * This function gets invoked when you want to do operations on a snapshot. + * The snapshot could be a native snapshot and you want to create a template out of it. + * Since snapshots don't have an IQN, we create a temp volume for this snapshot + * which will be used to carry out further operations. This function also handles deletion of + * temp volumes. A flag in the snapshot details table decides which action is performed. * - * @param snapshotInfo snapshot on Datera + * @param snapshotInfo snapshot on Datera * @param storagePoolId primary store ID */ private void createTempVolume(SnapshotInfo snapshotInfo, long storagePoolId) { - s_logger.debug("createTempVolume() from snapshot called"); - String ipPool = getIpPool(storagePoolId); + long csSnapshotId = snapshotInfo.getId(); SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, DateraUtil.SNAPSHOT_ID); if (snapshotDetails == null || snapshotDetails.getValue() == null) { - throw new CloudRuntimeException("'createTempVolume(SnapshotInfo, long)' should not be invoked unless " - + DateraUtil.SNAPSHOT_ID + " exists."); + throw new CloudRuntimeException("'createTempVolume(SnapshotInfo, long)' should not be invoked unless " + DateraUtil.SNAPSHOT_ID + " exists."); } DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePoolId, _storagePoolDetailsDao); snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, "tempVolume"); - if (snapshotDetails != null && snapshotDetails.getValue() != null - && snapshotDetails.getValue().equalsIgnoreCase("create")) { + if (snapshotDetails != null && snapshotDetails.getValue() != null && snapshotDetails.getValue().equalsIgnoreCase("create")) { snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, DateraUtil.SNAPSHOT_ID); String snapshotName = snapshotDetails.getValue(); @@ -1036,11 +900,14 @@ private void createTempVolume(SnapshotInfo snapshotInfo, long storagePoolId) { DateraObject.AppInstance clonedAppInstance; try { - clonedAppInstance = DateraUtil.cloneAppInstanceFromSnapshot(conn, clonedAppInstanceName, snapshotName, - ipPool); - DateraUtil.pollAppInstanceAvailable(conn, clonedAppInstanceName); + + // split the snapshot name to appInstanceName and the snapshot timestamp + String[] tokens = snapshotName.split(SEPERATOR_SNAPSHOT); + Preconditions.checkArgument(tokens.length == 2); + + clonedAppInstance = DateraUtil.cloneAppInstanceFromSnapshot(conn, clonedAppInstanceName, getDescription(snapshotInfo), tokens[0], tokens[1]); } catch (DateraObject.DateraError | UnsupportedEncodingException e) { - String errMesg = "Unable to create temp volume " + csSnapshotId + "Error:" + e.getMessage(); + String errMesg = "Unable to create temp volume " + csSnapshotId + "Error:" + e.getMessage(); s_logger.error(errMesg, e); throw new CloudRuntimeException(errMesg, e); } @@ -1048,16 +915,14 @@ private void createTempVolume(SnapshotInfo snapshotInfo, long storagePoolId) { if (clonedAppInstance == null) { throw new CloudRuntimeException("Unable to clone volume for snapshot " + snapshotName); } - s_logger.debug("Temp app_instance " + clonedAppInstanceName + " created"); + addTempVolumeToDb(csSnapshotId, clonedAppInstanceName); handleSnapshotDetails(csSnapshotId, DiskTO.IQN, DateraUtil.generateIqnPath(clonedAppInstance.getIqn())); - } else if (snapshotDetails != null && snapshotDetails.getValue() != null - && snapshotDetails.getValue().equalsIgnoreCase("delete")) { + } else if (snapshotDetails != null && snapshotDetails.getValue() != null && snapshotDetails.getValue().equalsIgnoreCase("delete")) { snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, DateraUtil.VOLUME_ID); try { - s_logger.debug("Deleting temp app_instance " + snapshotDetails.getValue()); DateraUtil.deleteAppInstance(conn, snapshotDetails.getValue()); } catch (UnsupportedEncodingException | DateraObject.DateraError dateraError) { String errMesg = "Error deleting temp volume " + dateraError.getMessage(); @@ -1068,29 +933,28 @@ private void createTempVolume(SnapshotInfo snapshotInfo, long storagePoolId) { snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, DiskTO.IQN); snapshotDetailsDao.remove(snapshotDetails.getId()); - } else { + } + else { throw new CloudRuntimeException("Invalid state in 'createTempVolume(SnapshotInfo, long)'"); } } /** - * This function gets invoked when we want to create a volume that caches the - * template on the primary storage. This 'template volume' will then be cloned - * to create new ROOT volumes. + * This function gets invoked when we want to create a volume that caches the template on the primary + * storage. This 'template volume' will then be cloned to create new ROOT volumes. * - * @param templateInfo Information about the template like id, size + * @param templateInfo Information about the template like id, size * @param storagePoolId the primary store to create this volume on * @return IQN of the template volume */ public String createTemplateVolume(TemplateInfo templateInfo, long storagePoolId) { - s_logger.debug("createTemplateVolume() as cache template called"); verifySufficientBytesForStoragePool(templateInfo, storagePoolId); DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePoolId, _storagePoolDetailsDao); - String iqn = null; - String appInstanceName = null; + String iqn; + try { long templateSizeBytes = getDataObjectSizeIncludingHypervisorSnapshotReserve(templateInfo, @@ -1098,17 +962,12 @@ public String createTemplateVolume(TemplateInfo templateInfo, long storagePoolId s_logger.debug("cached VM template sizeBytes: " + toHumanReadableSize(templateSizeBytes)); - int templateSizeGib = DateraUtil.bytesToGib(templateSizeBytes); - + int templateSizeGb = DateraUtil.bytesToGb(templateSizeBytes); int templateIops = DateraUtil.MAX_IOPS; int replicaCount = getNumReplicas(storagePoolId); - appInstanceName = getAppInstanceName(templateInfo); - String volumePlacement = getVolPlacement(storagePoolId); - String ipPool = getIpPool(storagePoolId); - s_logger.debug("cached VM template app_instance: " + appInstanceName + " ipPool: " + ipPool + " sizeGib: " + String.valueOf(templateSizeGib)); - DateraObject.AppInstance appInstance = DateraUtil.createAppInstance(conn, appInstanceName, templateSizeGib, - templateIops, replicaCount, volumePlacement, ipPool); + DateraObject.AppInstance appInstance = DateraUtil.createAppInstance(conn, getAppInstanceName(templateInfo), + getDescription(templateInfo), templateSizeGb, toBandwidthKiBps(templateIops), replicaCount); if (appInstance == null) { throw new CloudRuntimeException("Unable to create Template volume " + templateInfo.getId()); @@ -1121,7 +980,7 @@ public String createTemplateVolume(TemplateInfo templateInfo, long storagePoolId templatePoolRef.setInstallPath(DateraUtil.generateIqnPath(iqn)); templatePoolRef.setLocalDownloadPath(appInstance.getName()); - templatePoolRef.setTemplateSize(DateraUtil.gibToBytes(appInstance.getSize())); + templatePoolRef.setTemplateSize(DateraUtil.bytesToGb(appInstance.getSize())); tmpltPoolDao.update(templatePoolRef.getId(), templatePoolRef); @@ -1135,43 +994,35 @@ public String createTemplateVolume(TemplateInfo templateInfo, long storagePoolId storagePoolDao.update(storagePoolId, storagePool); - } catch (UnsupportedEncodingException | DateraObject.DateraError dateraError) { - if (DateraObject.DateraErrorTypes.ConflictError.equals(dateraError)) { - String errMesg = "template app Instance " + appInstanceName + " exists"; - s_logger.debug(errMesg, dateraError); - } else { - String errMesg = "Unable to create template app Instance " + dateraError.getMessage(); - s_logger.error(errMesg, dateraError); - throw new CloudRuntimeException(errMesg, dateraError); - } + } catch (UnsupportedEncodingException | DateraObject.DateraError e) { + String errMesg = "Unable to create app Instance " + e.getMessage(); + s_logger.error(errMesg, e); + throw new CloudRuntimeException(errMesg, e); } + return DateraUtil.generateIqnPath(iqn); } /** - * Entry point into the create logic. The storage subsystem call this method to - * create various data objects (volume/snapshot/template) + * Entry point into the create logic. The storage subsystem call this method to create various + * data objects (volume/snapshot/template) * * @param dataStore * @param dataObject * @param callback */ @Override - public void createAsync(DataStore dataStore, DataObject dataObject, - AsyncCompletionCallback callback) { + public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) { String iqn = null; String errMsg = null; try { if (dataObject.getType() == DataObjectType.VOLUME) { - s_logger.debug("createAsync - creating volume"); - iqn = createVolume((VolumeInfo) dataObject, dataStore.getId()); + iqn = createVolume((VolumeInfo)dataObject, dataStore.getId()); } else if (dataObject.getType() == DataObjectType.SNAPSHOT) { - s_logger.debug("createAsync - creating snapshot"); createTempVolume((SnapshotInfo) dataObject, dataStore.getId()); } else if (dataObject.getType() == DataObjectType.TEMPLATE) { - s_logger.debug("createAsync - creating template"); - iqn = createTemplateVolume((TemplateInfo) dataObject, dataStore.getId()); + iqn = createTemplateVolume((TemplateInfo)dataObject, dataStore.getId()); } else { errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync"; s_logger.error(errMsg); @@ -1187,19 +1038,16 @@ public void createAsync(DataStore dataStore, DataObject dataObject, } if (callback != null) { - CreateCmdResult result = new CreateCmdResult(iqn, new Answer(null, errMsg == null, errMsg)); - result.setResult(errMsg); - callback.complete(result); } } /** * Helper function which updates volume size in the volume_details table - * @param volumeId Volume information - * @param volumeSize Size in GB + * @param volumeId Volume information + * @param volumeSize Size in GB */ private void updateVolumeDetails(long volumeId, long volumeSize) { VolumeDetailVO volumeDetailVo = volumeDetailsDao.findDetail(volumeId, DateraUtil.VOLUME_SIZE); @@ -1214,46 +1062,41 @@ private void updateVolumeDetails(long volumeId, long volumeSize) { /** * Entrypoint for delete operations. * - * @param dataStore Primary storage + * @param dataStore Primary storage * @param dataObject object to delete - * @param callback used for async, complete the callback after the operation - * is done. + * @param callback used for async, complete the callback after the operation is done. */ @Override - public void deleteAsync(DataStore dataStore, DataObject dataObject, - AsyncCompletionCallback callback) { + public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) { String errMsg = null; try { if (dataObject.getType() == DataObjectType.VOLUME) { - s_logger.debug("deleteAsync - deleting volume"); - deleteVolume((VolumeInfo) dataObject, dataStore.getId()); + deleteVolume((VolumeInfo)dataObject, dataStore.getId()); } else if (dataObject.getType() == DataObjectType.SNAPSHOT) { - s_logger.debug("deleteAsync - deleting snapshot"); - deleteSnapshot((SnapshotInfo) dataObject, dataStore.getId()); + deleteSnapshot((SnapshotInfo)dataObject, dataStore.getId()); } else if (dataObject.getType() == DataObjectType.TEMPLATE) { - s_logger.debug("deleteAsync - deleting template"); - deleteTemplate((TemplateInfo) dataObject, dataStore.getId()); + deleteTemplate((TemplateInfo)dataObject, dataStore.getId()); } else { errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to deleteAsync"; } - } catch (Exception ex) { + } + catch (Exception ex) { errMsg = ex.getMessage(); s_logger.error(errMsg); } - CommandResult result = new CommandResult(); - - result.setResult(errMsg); - - callback.complete(result); + if (callback != null) { + CommandResult result = new CommandResult(); + result.setResult(errMsg); + callback.complete(result); + } } @Override - public void copyAsync(DataObject srcData, DataObject destData, - AsyncCompletionCallback callback) { + public void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback callback) { throw new UnsupportedOperationException(); } @@ -1269,17 +1112,15 @@ public boolean canCopy(DataObject srcData, DataObject destData) { } /** - * Entry point for taking a snapshot. A native snpashot is taken if the - * hypervisor supports it, otherwise a volume is created and the data is copied - * via the hypervisor and Cloudstack will treat this volume as a snapshot. + * Entry point for taking a snapshot. A native snpashot is taken if the hypervisor supports it, otherwise + * a volume is created and the data is copied via the hypervisor and Cloudstack will treat this volume as + * a snapshot. * * @param snapshotInfo Snapshot information - * @param callback Async context + * @param callback Async context */ @Override public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback callback) { - s_logger.debug("takeSnapshot() called"); - CreateCmdResult result; try { @@ -1297,64 +1138,59 @@ public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback capacityBytes) { + throw new CloudRuntimeException("Insufficient amount of space remains in this primary storage to take a snapshot"); + } + + storagePool.setUsedBytes(usedBytes); + + SnapshotObjectTO snapshotObjectTo = (SnapshotObjectTO)snapshotInfo.getTO(); if (shouldTakeSnapshot(snapshotInfo.getId())) { DateraObject.VolumeSnapshot volumeSnapshot = DateraUtil.takeVolumeSnapshot(conn, baseAppInstanceName); if (volumeSnapshot == null) { - s_logger.error("Unable to take native snapshot appInstance name:" + baseAppInstanceName - + " volume ID " + volumeInfo.getId()); - throw new CloudRuntimeException("Unable to take native snapshot for volume " + volumeInfo.getId()); + s_logger.error("Unable to take native snapshot appInstance name:" + baseAppInstanceName + " volume ID " + volumeInfo.getId()); + throw new CloudRuntimeException("Unable to take native snapshot for volume " + volumeInfo.getId()); } - String snapshotName = baseAppInstanceName + ":" + volumeSnapshot.getTimestamp(); - updateSnapshotDetails(snapshotInfo.getId(), baseAppInstanceName, snapshotName, storagePoolId, - baseAppInstance.getSize()); + String snapshotName = baseAppInstanceName + SEPERATOR_SNAPSHOT + volumeSnapshot.getTimestamp(); + updateSnapshotDetails(snapshotInfo.getId(), baseAppInstanceName, snapshotName, storagePoolId, baseAppInstance.getSize()); snapshotObjectTo.setPath("DateraSnapshotId=" + snapshotName); - s_logger.info(" snapshot taken: " + snapshotName); } else { - StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId); - - long capacityBytes = storagePool.getCapacityBytes(); - long usedBytes = getUsedBytes(storagePool); - int volumeSizeGib = baseAppInstance.getSize(); - long volumeSizeBytes = DateraUtil.gibToBytes(volumeSizeGib); - String volumePlacement = getVolPlacement(storagePoolId); - String ipPool = getIpPool(storagePoolId); - - usedBytes += volumeSizeBytes; - - if (usedBytes > capacityBytes) { - throw new CloudRuntimeException( - "Insufficient amount of space remains in this primary storage to create a snapshot volume"); - } - String appInstanceName = getAppInstanceName(snapshotInfo); DateraObject.AppInstance snapshotAppInstance = DateraUtil.createAppInstance(conn, appInstanceName, - volumeSizeGib, DateraUtil.MAX_IOPS, getNumReplicas(storagePoolId), volumePlacement, ipPool); + getDescription(snapshotInfo), volumeSizeGb, toBandwidthKiBps(DateraUtil.MAX_IOPS), getNumReplicas(storagePoolId)); snapshotObjectTo.setPath(snapshotAppInstance.getName()); String iqnPath = DateraUtil.generateIqnPath(snapshotAppInstance.getIqn()); - updateSnapshotDetails(snapshotInfo.getId(), snapshotAppInstance.getName(), storagePoolId, - snapshotAppInstance.getSize(), iqnPath); + updateSnapshotDetails(snapshotInfo.getId(), snapshotAppInstance.getName(), storagePoolId, snapshotAppInstance.getSize(), iqnPath); snapshotObjectTo.setPath("DateraVolumeId=" + snapshotAppInstance.getName()); - - storagePool.setUsedBytes(usedBytes); - // update size in storage pool - _storagePoolDao.update(storagePoolId, storagePool); } + //update size in storage pool + _storagePoolDao.update(storagePoolId, storagePool); + CreateObjectAnswer createObjectAnswer = new CreateObjectAnswer(snapshotObjectTo); result = new CreateCmdResult(null, createObjectAnswer); result.setResult(null); - } catch (Exception ex) { + } + catch (Exception ex) { s_logger.debug("Failed to take CloudStack snapshot: " + snapshotInfo.getId(), ex); result = new CreateCmdResult(null, new CreateObjectAnswer(ex.toString())); @@ -1362,72 +1198,87 @@ public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback: - * @param storagePoolId primary storage - * @param snapshotSizeGb snapshotSize - * @param snapshotIqn IQN of snapshot + * @param csSnapshotId Snapshot ID on Cloudstack + * @param snapshotAppInstanceName snapshot name on Datera : + * @param storagePoolId primary storage + * @param snapshotSizeGb snapshotSize + * @param snapshotIqn IQN of snapshot */ - private void updateSnapshotDetails(long csSnapshotId, String snapshotAppInstanceName, long storagePoolId, - long snapshotSizeGb, String snapshotIqn) { - SnapshotDetailsVO snapshotDetail = new SnapshotDetailsVO(csSnapshotId, DateraUtil.VOLUME_ID, - String.valueOf(snapshotAppInstanceName), false); + private void updateSnapshotDetails(long csSnapshotId, String snapshotAppInstanceName, long storagePoolId, long snapshotSizeGb, String snapshotIqn) { + SnapshotDetailsVO snapshotDetail = new SnapshotDetailsVO(csSnapshotId, + DateraUtil.VOLUME_ID, + String.valueOf(snapshotAppInstanceName), + false); _snapshotDetailsDao.persist(snapshotDetail); - snapshotDetail = new SnapshotDetailsVO(csSnapshotId, DateraUtil.STORAGE_POOL_ID, String.valueOf(storagePoolId), + snapshotDetail = new SnapshotDetailsVO(csSnapshotId, + DateraUtil.STORAGE_POOL_ID, + String.valueOf(storagePoolId), false); _snapshotDetailsDao.persist(snapshotDetail); - snapshotDetail = new SnapshotDetailsVO(csSnapshotId, DateraUtil.VOLUME_SIZE, String.valueOf(snapshotSizeGb), + snapshotDetail = new SnapshotDetailsVO(csSnapshotId, + DateraUtil.VOLUME_SIZE, + String.valueOf(snapshotSizeGb), false); _snapshotDetailsDao.persist(snapshotDetail); - snapshotDetail = new SnapshotDetailsVO(csSnapshotId, DiskTO.IQN, snapshotIqn, false); + snapshotDetail = new SnapshotDetailsVO(csSnapshotId, + DiskTO.IQN, + snapshotIqn, + false); _snapshotDetailsDao.persist(snapshotDetail); } @@ -1437,10 +1288,9 @@ private void updateSnapshotDetails(long csSnapshotId, String snapshotAppInstance * @param snapshotInfo snapshot information * @param storagePoolId primary storage * @throws UnsupportedEncodingException - * @throws DateraObject.DateraError + * @throws DateraObject.DateraError */ - private void deleteSnapshot(SnapshotInfo snapshotInfo, long storagePoolId) - throws UnsupportedEncodingException, DateraObject.DateraError { + private void deleteSnapshot(SnapshotInfo snapshotInfo, long storagePoolId) throws UnsupportedEncodingException, DateraObject.DateraError { long csSnapshotId = snapshotInfo.getId(); @@ -1453,19 +1303,22 @@ private void deleteSnapshot(SnapshotInfo snapshotInfo, long storagePoolId) // Native snapshot being used, delete that String snapshotName = snapshotDetails.getValue(); + // split the snapshot name to appInstanceName and the snapshot timestamp + String[] tokens = snapshotName.split(SEPERATOR_SNAPSHOT); + Preconditions.checkArgument(tokens.length == 2); - DateraUtil.deleteVolumeSnapshot(conn, snapshotName); + DateraUtil.deleteVolumeSnapshot(conn, tokens[0], tokens[1]); - // check if the underlying volume needs to be deleted + //check if the underlying volume needs to be deleted SnapshotVO snapshot = _snapshotDao.findById(csSnapshotId); VolumeVO volume = _volumeDao.findById(snapshot.getVolumeId()); if (volume == null) { - // deleted from Cloudstack. Check if other snapshots are using this volume + //deleted from Cloudstack. Check if other snapshots are using this volume volume = _volumeDao.findByIdIncludingRemoved(snapshot.getVolumeId()); - if (shouldDeleteVolume(snapshot.getVolumeId(), snapshot.getId())) { + if(shouldDeleteVolume(snapshot.getVolumeId(), snapshot.getId())) { DateraUtil.deleteAppInstance(conn, volume.getFolder()); } } @@ -1483,29 +1336,27 @@ private void deleteSnapshot(SnapshotInfo snapshotInfo, long storagePoolId) StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); - // getUsedBytes(StoragePool) will not include the snapshot to delete because it - // has already been deleted by this point + // getUsedBytes(StoragePool) will not include the snapshot to delete because it has already been deleted by this point long usedBytes = getUsedBytes(storagePool); storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes); storagePoolDao.update(storagePoolId, storagePool); - } catch (Exception ex) { - s_logger.debug("Error in 'deleteSnapshot(SnapshotInfo, long)'. CloudStack snapshot ID: " + csSnapshotId, - ex); + } + catch (Exception ex) { + s_logger.debug("Error in 'deleteSnapshot(SnapshotInfo, long)'. CloudStack snapshot ID: " + csSnapshotId, ex); throw ex; } } /** * Deletes a template from Datera - * @param templateInfo Information about Template - * @param storagePoolId Primary storage + * @param templateInfo Information about Template + * @param storagePoolId Primary storage * @throws UnsupportedEncodingException - * @throws DateraObject.DateraError + * @throws DateraObject.DateraError */ - private void deleteTemplate(TemplateInfo templateInfo, long storagePoolId) - throws UnsupportedEncodingException, DateraObject.DateraError { + private void deleteTemplate(TemplateInfo templateInfo, long storagePoolId) throws UnsupportedEncodingException, DateraObject.DateraError { try { DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePoolId, _storagePoolDetailsDao); @@ -1520,14 +1371,14 @@ private void deleteTemplate(TemplateInfo templateInfo, long storagePoolId) StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); - // getUsedBytes(StoragePool) will not include the template to delete because the - // "template_spool_ref" table has already been updated by this point + // getUsedBytes(StoragePool) will not include the template to delete because the "template_spool_ref" table has already been updated by this point long usedBytes = getUsedBytes(storagePool); storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes); storagePoolDao.update(storagePoolId, storagePool); - } catch (Exception ex) { + } + catch (Exception ex) { s_logger.debug("Failed to delete template volume. CloudStack template ID: " + templateInfo.getId(), ex); throw ex; @@ -1542,67 +1393,14 @@ private void deleteTemplate(TemplateInfo templateInfo, long storagePoolId) * @throws CloudRuntimeException */ @Override - public void revertSnapshot(SnapshotInfo snapshotInfo, SnapshotInfo snapshotOnPrimaryStore, - AsyncCompletionCallback callback) { - - VolumeInfo volumeInfo = snapshotInfo.getBaseVolume(); - VolumeVO volumeVO = _volumeDao.findById(volumeInfo.getId()); - - long storagePoolId = volumeVO.getPoolId(); - long csSnapshotId = snapshotInfo.getId(); - s_logger.info("Datera - restoreVolumeSnapshot from snapshotId " + String.valueOf(csSnapshotId) + " to volume" - + volumeVO.getName()); - - DateraObject.AppInstance appInstance; - - try { - - if (volumeVO == null || volumeVO.getRemoved() != null) { - String errMsg = "The volume that the snapshot belongs to no longer exists."; - - CommandResult commandResult = new CommandResult(); - - commandResult.setResult(errMsg); - - callback.complete(commandResult); - - return; - } - - DateraObject.DateraConnection conn = DateraUtil.getDateraConnection(storagePoolId, _storagePoolDetailsDao); - - SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, DateraUtil.SNAPSHOT_ID); - - if (snapshotDetails != null && snapshotDetails.getValue() != null) { - // Native snapshot being used, restore snapshot from Datera AppInstance - - String snapshotName = snapshotDetails.getValue(); - - s_logger.info("Datera - restoreVolumeSnapshot: " + snapshotName); - - appInstance = DateraUtil.restoreVolumeSnapshot(conn, snapshotName); - - Preconditions.checkNotNull(appInstance); - - updateVolumeDetails(volumeInfo.getId(), appInstance.getSize()); - } - - CommandResult commandResult = new CommandResult(); - - callback.complete(commandResult); - - } catch (Exception ex) { - s_logger.debug("Error in 'revertSnapshot()'. CloudStack snapshot ID: " + csSnapshotId, ex); - throw new CloudRuntimeException(ex.getMessage()); - } - + public void revertSnapshot(SnapshotInfo snapshot, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback callback) { + throw new UnsupportedOperationException("Reverting not supported. Create a template or volume based on the snapshot instead."); } /** - * Resizes a volume on Datera, shrinking is not allowed. Resize also takes into - * account the HSR + * Resizes a volume on Datera, shrinking is not allowed. Resize also takes into account the HSR * @param dataObject volume to resize - * @param callback async context + * @param callback async context */ @Override public void resize(DataObject dataObject, AsyncCompletionCallback callback) { @@ -1610,12 +1408,12 @@ public void resize(DataObject dataObject, AsyncCompletionCallback lstSnapshots = getNonDestroyedSnapshots(csVolumeId); for (SnapshotVO snapshot : lstSnapshots) { - if (snapshotToIgnoreId != null && snapshot.getId() == snapshotToIgnoreId) { + if (snapshotToIgnoreId != null && snapshot.getId() == snapshotToIgnoreId){ continue; } SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(snapshot.getId(), DateraUtil.SNAPSHOT_ID); @@ -1879,4 +1674,14 @@ public boolean isVmTagsNeeded(String tagKey) { @Override public void provideVmTags(long vmId, long volumeId, String tagValue) { } -} + + @Override + public String getConfigComponentName() { + return DateraPrimaryDataStoreDriver.class.getSimpleName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[] {MaxIopsScalingFactor}; + } +} \ No newline at end of file diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java index ff253fc8d181..821dd36fd0d0 100644 --- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java @@ -20,14 +20,9 @@ import com.cloud.agent.api.StoragePoolInfo; import com.cloud.capacity.CapacityManager; -import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; -import com.cloud.dc.ClusterDetailsDao; -import com.cloud.dc.dao.ClusterDao; -import com.cloud.host.Host; import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.resource.ResourceManager; import com.cloud.storage.SnapshotVO; @@ -38,7 +33,6 @@ import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.SnapshotDetailsDao; import com.cloud.storage.dao.SnapshotDetailsVO; -import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -46,7 +40,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.util.DateraUtil; @@ -57,102 +50,40 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.UUID; public class DateraPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle { private static final Logger s_logger = Logger.getLogger(DateraPrimaryDataStoreLifeCycle.class); - @Inject - private CapacityManager _capacityMgr; - @Inject - private DataCenterDao zoneDao; - @Inject - private ClusterDao _clusterDao; - @Inject - private ClusterDetailsDao _clusterDetailsDao; - @Inject - private PrimaryDataStoreDao storagePoolDao; - @Inject - private HostDao _hostDao; - @Inject - private PrimaryDataStoreHelper dataStoreHelper; - @Inject - private ResourceManager _resourceMgr; - @Inject - private SnapshotDao _snapshotDao; - @Inject - private SnapshotDetailsDao _snapshotDetailsDao; - @Inject - private StorageManager _storageMgr; - @Inject - private StoragePoolHostDao _storagePoolHostDao; - @Inject - private StoragePoolAutomation storagePoolAutomation; - + @Inject private CapacityManager _capacityMgr; + @Inject private DataCenterDao zoneDao; + @Inject private PrimaryDataStoreDao storagePoolDao; + @Inject private PrimaryDataStoreHelper dataStoreHelper; + @Inject private ResourceManager _resourceMgr; + @Inject private SnapshotDao _snapshotDao; + @Inject private SnapshotDetailsDao _snapshotDetailsDao; + @Inject private StorageManager _storageMgr; + @Inject private StoragePoolAutomation storagePoolAutomation; + + // invoked to add primary storage that is based on the SolidFire plug-in @Override public DataStore initialize(Map dsInfos) { - String url = (String) dsInfos.get("url"); - Long zoneId = (Long) dsInfos.get("zoneId"); - Long podId = (Long) dsInfos.get("podId"); - Long clusterId = (Long) dsInfos.get("clusterId"); - String storagePoolName = (String) dsInfos.get("name"); - String providerName = (String) dsInfos.get("providerName"); - Long capacityBytes = (Long) dsInfos.get("capacityBytes"); - Long capacityIops = (Long) dsInfos.get("capacityIops"); - String tags = (String) dsInfos.get("tags"); + String url = (String)dsInfos.get("url"); + Long zoneId = (Long)dsInfos.get("zoneId"); + String storagePoolName = (String)dsInfos.get("name"); + String providerName = (String)dsInfos.get("providerName"); + Long capacityBytes = (Long)dsInfos.get("capacityBytes"); + Long capacityIops = (Long)dsInfos.get("capacityIops"); + String tags = (String)dsInfos.get("tags"); @SuppressWarnings("unchecked") - Map details = (Map) dsInfos.get("details"); - String domainName = details.get("domainname"); + Map details = (Map)dsInfos.get("details"); String storageVip = DateraUtil.getStorageVip(url); - int storagePort = DateraUtil.getStoragePort(url); - int numReplicas = DateraUtil.getNumReplicas(url); - String volPlacement = DateraUtil.getVolPlacement(url); - String clusterAdminUsername = DateraUtil.getValue(DateraUtil.CLUSTER_ADMIN_USERNAME, url); - String clusterAdminPassword = DateraUtil.getValue(DateraUtil.CLUSTER_ADMIN_PASSWORD, url); - String uuid; - String randomString; - - PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters(); - // checks if primary datastore is clusterwide. If so, uses the clusterId to set - // the uuid and then sets the podId and clusterId parameters - if (clusterId != null) { - if (podId == null) { - throw new CloudRuntimeException("The Pod ID must be specified."); - } - if (zoneId == null) { - throw new CloudRuntimeException("The Zone ID must be specified."); - } - ClusterVO cluster = _clusterDao.findById(clusterId); - String clusterUuid = cluster.getUuid(); - randomString = DateraUtil.generateUUID(clusterUuid); - // uuid = DateraUtil.PROVIDER_NAME + "_" + cluster.getUuid() + "_" + storageVip - // + "_" + clusterAdminUsername + "_" + numReplicas + "_" + volPlacement; - uuid = DateraUtil.PROVIDER_NAME + "_" + clusterUuid + "_" + randomString; - s_logger.debug("Datera - Setting Datera cluster-wide primary storage uuid to " + uuid); - parameters.setPodId(podId); - parameters.setClusterId(clusterId); - - HypervisorType hypervisorType = getHypervisorTypeForCluster(clusterId); - - if (!isSupportedHypervisorType(hypervisorType)) { - throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type."); - } + DataCenterVO zone = zoneDao.findById(zoneId); - } - // sets the uuid with zoneid in it - else { - DataCenterVO zone = zoneDao.findById(zoneId); - String zoneUuid = zone.getUuid(); - randomString = DateraUtil.generateUUID(zoneUuid); - // uuid = DateraUtil.PROVIDER_NAME + "_" + zone.getUuid() + "_" + storageVip + - // "_" + clusterAdminUsername + "_" + numReplicas + "_" + volPlacement; - uuid = DateraUtil.PROVIDER_NAME + "_" + zoneUuid + "_" + randomString; - - s_logger.debug("Datera - Setting Datera zone-wide primary storage uuid to " + uuid); - } if (capacityBytes == null || capacityBytes <= 0) { throw new IllegalArgumentException("'capacityBytes' must be present and greater than 0."); } @@ -161,17 +92,13 @@ public DataStore initialize(Map dsInfos) { throw new IllegalArgumentException("'capacityIops' must be present and greater than 0."); } - if (domainName == null) { - domainName = "ROOT"; - s_logger.debug("setting the domain to ROOT"); - } - s_logger.debug("Datera - domainName: " + domainName); + PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters(); parameters.setHost(storageVip); parameters.setPort(storagePort); parameters.setPath(DateraUtil.getModifiedUrl(url)); parameters.setType(StoragePoolType.Iscsi); - parameters.setUuid(uuid); + parameters.setUuid(UUID.randomUUID().toString()); parameters.setZoneId(zoneId); parameters.setName(storagePoolName); parameters.setProviderName(providerName); @@ -188,6 +115,10 @@ public DataStore initialize(Map dsInfos) { details.put(DateraUtil.MANAGEMENT_VIP, managementVip); details.put(DateraUtil.MANAGEMENT_PORT, String.valueOf(managementPort)); + + String clusterAdminUsername = DateraUtil.getValue(DateraUtil.CLUSTER_ADMIN_USERNAME, url); + String clusterAdminPassword = DateraUtil.getValue(DateraUtil.CLUSTER_ADMIN_PASSWORD, url); + details.put(DateraUtil.CLUSTER_ADMIN_USERNAME, clusterAdminUsername); details.put(DateraUtil.CLUSTER_ADMIN_PASSWORD, clusterAdminPassword); @@ -201,8 +132,9 @@ public DataStore initialize(Map dsInfos) { lClusterDefaultMinIops = Long.parseLong(clusterDefaultMinIops); } } catch (NumberFormatException ex) { - s_logger.warn("Cannot parse the setting of " + DateraUtil.CLUSTER_DEFAULT_MIN_IOPS - + ", using default value: " + lClusterDefaultMinIops + ". Exception: " + ex); + s_logger.warn("Cannot parse the setting of " + DateraUtil.CLUSTER_DEFAULT_MIN_IOPS + + ", using default value: " + lClusterDefaultMinIops + + ". Exception: " + ex); } try { @@ -212,27 +144,30 @@ public DataStore initialize(Map dsInfos) { lClusterDefaultMaxIops = Long.parseLong(clusterDefaultMaxIops); } } catch (NumberFormatException ex) { - s_logger.warn("Cannot parse the setting of " + DateraUtil.CLUSTER_DEFAULT_MAX_IOPS - + ", using default value: " + lClusterDefaultMaxIops + ". Exception: " + ex); + s_logger.warn("Cannot parse the setting of " + DateraUtil.CLUSTER_DEFAULT_MAX_IOPS + + ", using default value: " + lClusterDefaultMaxIops + + ". Exception: " + ex); } + if (lClusterDefaultMinIops > lClusterDefaultMaxIops) { - throw new CloudRuntimeException("The parameter '" + DateraUtil.CLUSTER_DEFAULT_MIN_IOPS - + "' must be less than or equal to the parameter '" + DateraUtil.CLUSTER_DEFAULT_MAX_IOPS + "'."); + throw new CloudRuntimeException("The parameter '" + DateraUtil.CLUSTER_DEFAULT_MIN_IOPS + "' must be less than or equal to the parameter '" + + DateraUtil.CLUSTER_DEFAULT_MAX_IOPS + "'."); } + int numReplicas = DateraUtil.getNumReplicas(url); + if (numReplicas < DateraUtil.MIN_NUM_REPLICAS || numReplicas > DateraUtil.MAX_NUM_REPLICAS) { - throw new CloudRuntimeException("The parameter '" + DateraUtil.NUM_REPLICAS + "' must be between " - + DateraUtil.CLUSTER_DEFAULT_MAX_IOPS + "' and " + DateraUtil.MAX_NUM_REPLICAS); + throw new CloudRuntimeException("The parameter '" + DateraUtil.NUM_REPLICAS + "' must be between " + + DateraUtil.CLUSTER_DEFAULT_MAX_IOPS + "' and " + DateraUtil.MAX_NUM_REPLICAS); } + details.put(DateraUtil.NUM_REPLICAS, String.valueOf(DateraUtil.getNumReplicas(url))); + details.put(DateraUtil.CLUSTER_DEFAULT_MIN_IOPS, String.valueOf(lClusterDefaultMinIops)); details.put(DateraUtil.CLUSTER_DEFAULT_MAX_IOPS, String.valueOf(lClusterDefaultMaxIops)); - details.put(DateraUtil.NUM_REPLICAS, String.valueOf(DateraUtil.getNumReplicas(url))); - details.put(DateraUtil.VOL_PLACEMENT, String.valueOf(DateraUtil.getVolPlacement(url))); - details.put(DateraUtil.IP_POOL, String.valueOf(DateraUtil.getIpPool(url))); - + // this adds a row in the cloud.storage_pool table for this Datera cluster return dataStoreHelper.createPrimaryDataStore(parameters); } @@ -242,59 +177,17 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis } @Override - public boolean attachCluster(DataStore datastore, ClusterScope scope) { - PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) datastore; - - // check if there is at least one host up in this cluster - List allHosts = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing, - primaryDataStoreInfo.getClusterId(), primaryDataStoreInfo.getPodId(), - primaryDataStoreInfo.getDataCenterId()); - - if (allHosts.isEmpty()) { - storagePoolDao.expunge(primaryDataStoreInfo.getId()); - - throw new CloudRuntimeException( - "No host up to associate a storage pool with in cluster " + primaryDataStoreInfo.getClusterId()); - } - - List poolHosts = new ArrayList(); - - for (HostVO host : allHosts) { - try { - _storageMgr.connectHostToSharedPool(host.getId(), primaryDataStoreInfo.getId()); - - poolHosts.add(host); - } catch (Exception e) { - s_logger.warn("Unable to establish a connection between " + host + " and " + primaryDataStoreInfo, e); - } - } - - if (poolHosts.isEmpty()) { - s_logger.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '" - + primaryDataStoreInfo.getClusterId() + "'."); - - storagePoolDao.expunge(primaryDataStoreInfo.getId()); - - throw new CloudRuntimeException("Failed to access storage pool"); - } - - dataStoreHelper.attachCluster(datastore); - - return true; - // throw new UnsupportedOperationException("Only Zone-wide scope is supported - // with the Datera Storage driver"); + public boolean attachCluster(DataStore store, ClusterScope scope) { + return true; // should be ignored for zone-wide-only plug-ins } @Override public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { dataStoreHelper.attachZone(dataStore); - List xenServerHosts = _resourceMgr - .listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.XenServer, scope.getScopeId()); - List vmWareServerHosts = _resourceMgr - .listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.VMware, scope.getScopeId()); - List kvmHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, - scope.getScopeId()); + List xenServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.XenServer, scope.getScopeId()); + List vmWareServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.VMware, scope.getScopeId()); + List kvmHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId()); List hosts = new ArrayList(); hosts.addAll(xenServerHosts); @@ -334,14 +227,11 @@ public boolean deleteDataStore(DataStore store) { if (lstSnapshots != null) { for (SnapshotVO snapshot : lstSnapshots) { - SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshot.getId(), - DateraUtil.STORAGE_POOL_ID); + SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshot.getId(), DateraUtil.STORAGE_POOL_ID); // if this snapshot belongs to the storagePool that was passed in - if (snapshotDetails != null && snapshotDetails.getValue() != null - && Long.parseLong(snapshotDetails.getValue()) == store.getId()) { - throw new CloudRuntimeException( - "This primary storage cannot be deleted because it currently contains one or more snapshots."); + if (snapshotDetails != null && snapshotDetails.getValue() != null && Long.parseLong(snapshotDetails.getValue()) == store.getId()) { + throw new CloudRuntimeException("This primary storage cannot be deleted because it currently contains one or more snapshots."); } } } @@ -365,8 +255,7 @@ public void updateStoragePool(StoragePool storagePool, Map detai long usedBytes = _capacityMgr.getUsedBytes(storagePoolVo); if (capacityBytes < usedBytes) { - throw new CloudRuntimeException( - "Cannot reduce the number of bytes for this storage pool as it would lead to an insufficient number of bytes"); + throw new CloudRuntimeException("Cannot reduce the number of bytes for this storage pool as it would lead to an insufficient number of bytes"); } } @@ -377,8 +266,7 @@ public void updateStoragePool(StoragePool storagePool, Map detai long usedIops = _capacityMgr.getUsedIops(storagePoolVo); if (capacityIops < usedIops) { - throw new CloudRuntimeException( - "Cannot reduce the number of IOPS for this storage pool as it would lead to an insufficient number of IOPS"); + throw new CloudRuntimeException("Cannot reduce the number of IOPS for this storage pool as it would lead to an insufficient number of IOPS"); } } } @@ -392,29 +280,4 @@ public void enableStoragePool(DataStore dataStore) { public void disableStoragePool(DataStore dataStore) { dataStoreHelper.disable(dataStore); } - - private HypervisorType getHypervisorTypeForCluster(long clusterId) { - ClusterVO cluster = _clusterDao.findById(clusterId); - - if (cluster == null) { - throw new CloudRuntimeException("Cluster ID '" + clusterId + "' was not found in the database."); - } - - return cluster.getHypervisorType(); - } - - private static boolean isSupportedHypervisorType(HypervisorType hypervisorType) { - return HypervisorType.XenServer.equals(hypervisorType) || HypervisorType.VMware.equals(hypervisorType) - || HypervisorType.KVM.equals(hypervisorType); - } - - private HypervisorType getHypervisorType(long hostId) { - HostVO host = _hostDao.findById(hostId); - - if (host != null) { - return host.getHypervisorType(); - } - - return HypervisorType.None; - } -} \ No newline at end of file +} diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/provider/DateraHostListener.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/provider/DateraHostListener.java index 99d0758a96a1..f28dd5e4a8aa 100644 --- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/provider/DateraHostListener.java +++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/provider/DateraHostListener.java @@ -84,10 +84,6 @@ public boolean hostConnect(long hostId, long storagePoolId) { HostVO host = _hostDao.findById(hostId); - if (host == null) { - s_logger.error("Failed to add host by HostListener as host was not found with id : " + hostId); - return false; - } StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(storagePoolId, hostId); if (storagePoolHost == null) { @@ -100,7 +96,6 @@ public boolean hostConnect(long hostId, long storagePoolId) { handleXenServer(host.getClusterId(), host.getId(), storagePoolId); } else if (host.getHypervisorType().equals(HypervisorType.KVM)) { - //handleKVM(host.getClusterId(), host.getId(), storagePoolId); handleKVM(hostId, storagePoolId); } @@ -220,20 +215,6 @@ private void handleVMware(HostVO host, boolean add) { } } - private void handleKVM(long clusterId, long hostId, long storagePoolId) { - List storagePaths = getStoragePaths(clusterId, storagePoolId); - - StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); - - for (String storagePath : storagePaths) { - ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool); - - cmd.setStoragePath(storagePath); - - sendModifyStoragePoolCommand(cmd, storagePool, hostId); - } - } - private void handleKVM(long hostId, long storagePoolId) { StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); @@ -257,10 +238,10 @@ private List getStoragePaths(long clusterId, long storagePoolId) { Long hostIdForVm = vmInstance.getHostId() != null ? vmInstance.getHostId() : vmInstance.getLastHostId(); - if (hostIdForVm != null ) { + if (hostIdForVm != null) { HostVO hostForVm = _hostDao.findById(hostIdForVm); - if (hostForVm.getClusterId().equals(clusterId)) { + if (hostForVm != null && hostForVm.getClusterId().equals(clusterId)) { storagePaths.add(volume.get_iScsiName()); } } @@ -302,7 +283,7 @@ private void sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StorageP _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg); throw new CloudRuntimeException("Unable to establish a connection from agent to storage pool " + storagePool.getId() + " due to " + answer.getDetails() + - " (" + storagePool.getId() + ")"); + " (" + storagePool.getId() + ")"); } assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; Pool = " + storagePool.getId() + " Host = " + hostId; diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/util/DateraObject.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/util/DateraObject.java index 43dcb78bb67a..86a082d8be40 100644 --- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/util/DateraObject.java +++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/util/DateraObject.java @@ -14,64 +14,78 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. - +// package org.apache.cloudstack.storage.datastore.util; import com.google.gson.annotations.SerializedName; -import java.util.HashMap; +import java.io.UnsupportedEncodingException; +import java.util.ArrayList; import java.util.List; -import java.util.Map; import org.apache.commons.lang3.StringUtils; public class DateraObject { - public static final String DEFAULT_CREATE_MODE = "cloudstack"; public static final String DEFAULT_STORAGE_NAME = "storage-1"; public static final String DEFAULT_VOLUME_NAME = "volume-1"; public static final String DEFAULT_ACL = "deny_all"; - public static final String DEFAULT_STORAGE_FORCE_BOOLEAN = "true"; public enum AppState { ONLINE, OFFLINE; @Override - public String toString() { + public String toString(){ return this.name().toLowerCase(); } } + public enum DateraOperation { ADD, REMOVE; @Override - public String toString() { + public String toString(){ return this.name().toLowerCase(); } } public enum DateraErrorTypes { - PermissionDeniedError, InvalidRouteError, AuthFailedError, ValidationFailedError, InvalidRequestError, - NotFoundError, NotConnectedError, InvalidSessionKeyError, DatabaseError, InternalError,ConflictError; + PermissionDeniedError, InvalidRouteError, AuthFailedError, + ValidationFailedError, InvalidRequestError, NotFoundError, + NotConnectedError, InvalidSessionKeyError, DatabaseError, + InternalError; - public boolean equals(DateraError err) { + public boolean equals(DateraError err){ return this.name().equals(err.getName()); } } + public static class DateraApiResponse { + public String path; + public String version; + public String tenant; + public String data; + + public String getResponseObjectString() { + return data; + } + } + public static class DateraConnection { private int managementPort; private String managementIp; private String username; private String password; + private String token; - public DateraConnection(String managementIp, int managementPort, String username, String password) { + public DateraConnection(String managementIp, int managementPort, String username, String password) throws UnsupportedEncodingException, DateraError { this.managementPort = managementPort; this.managementIp = managementIp; this.username = username; this.password = password; + this.token = DateraUtil.login(this); } public int getManagementPort() { @@ -89,6 +103,10 @@ public String getUsername() { public String getPassword() { return password; } + + public String getToken() { + return token; + } } public static class DateraLogin { @@ -115,6 +133,7 @@ public class Access { private String iqn; private List ips; + public Access(String iqn, List ips) { this.iqn = iqn; this.ips = ips; @@ -127,15 +146,16 @@ public String getIqn() { public static class PerformancePolicy { - @SerializedName("total_iops_max") - private Integer totalIops; + @SerializedName("total_bandwidth_max") + private Integer totalBandwidth; + - public PerformancePolicy(int totalIops) { - this.totalIops = totalIops; + public PerformancePolicy(int totalBandwidthKiBps) { + this.totalBandwidth = totalBandwidthKiBps; } - public Integer getTotalIops() { - return totalIops; + public Integer getTotalBandwidth() { + return totalBandwidth; } } @@ -151,33 +171,22 @@ public static class Volume { @SerializedName("performance_policy") private PerformancePolicy performancePolicy; - @SerializedName("placement_mode") - private String placementMode; - @SerializedName("op_state") private String opState; - public Volume(int size, int totalIops, int replicaCount) { + public Volume(int size, int totalBandwidthKiBps, int replicaCount) { this.name = DEFAULT_VOLUME_NAME; this.size = size; this.replicaCount = replicaCount; - this.performancePolicy = new PerformancePolicy(totalIops); - } - - public Volume(int size, int totalIops, int replicaCount, String placementMode) { - this.name = DEFAULT_VOLUME_NAME; - this.size = size; - this.replicaCount = replicaCount; - this.performancePolicy = new PerformancePolicy(totalIops); - this.placementMode = placementMode; + this.performancePolicy = new PerformancePolicy(totalBandwidthKiBps); } public Volume(Integer newSize) { - this.size = newSize; + this.size=newSize; } - public Volume(String newPlacementMode) { - this.placementMode = newPlacementMode; + public Volume(String path) { + this.path=path; } public PerformancePolicy getPerformancePolicy() { @@ -188,11 +197,7 @@ public int getSize() { return size; } - public String getPlacementMode() { - return placementMode; - } - - public String getPath() { + public String getPath(){ return path; } @@ -204,56 +209,36 @@ public String getOpState() { public static class StorageInstance { private final String name = DEFAULT_STORAGE_NAME; - private Map volumes; + private List volumes; private Access access; - private String force; - - @SerializedName("ip_pool") - private String ipPool; - public StorageInstance(int size, int totalIops, int replicaCount) { - Volume volume = new Volume(size, totalIops, replicaCount); - volumes = new HashMap(); - volumes.put(DEFAULT_VOLUME_NAME, volume); + public StorageInstance(int size, int totalBandWidthKiBps, int replicaCount) { + Volume volume = new Volume(size, totalBandWidthKiBps, replicaCount); + volumes = new ArrayList<>(); + volumes.add(volume); } - public StorageInstance(int size, int totalIops, int replicaCount, String placementMode, String ipPool) { - Volume volume = new Volume(size, totalIops, replicaCount, placementMode); - volumes = new HashMap(); - volumes.put(DEFAULT_VOLUME_NAME, volume); - this.ipPool = new StringBuilder("/access_network_ip_pools/").append(ipPool).toString(); - } - - public StorageInstance(int size, int totalIops, int replicaCount, String placementMode, String ipPool, String force) { - Volume volume = new Volume(size, totalIops, replicaCount, placementMode); - volumes = new HashMap(); - volumes.put(DEFAULT_VOLUME_NAME, volume); - this.ipPool = new StringBuilder("/access_network_ip_pools/").append(ipPool).toString(); - this.force = DEFAULT_STORAGE_FORCE_BOOLEAN; - } - - public Access getAccess() { + public Access getAccess(){ return access; } public Volume getVolume() { - return volumes.get(DEFAULT_VOLUME_NAME); + return volumes.get(0); } public int getSize() { return getVolume().getSize(); } - public String getForce() { - return this.force; - } - } public static class AppInstance { private String name; + @SerializedName("descr") + private String description; + @SerializedName("access_control_mode") private String accessControlMode; @@ -261,32 +246,27 @@ public static class AppInstance { private String createMode; @SerializedName("storage_instances") - private Map storageInstances; + private List storageInstances; + + @SerializedName("clone_volume_src") + private Volume cloneVolumeSrc; - @SerializedName("clone_src") - private String cloneSrc; + @SerializedName("clone_snapshot_src") + private VolumeSnapshot cloneSnapshotSrc; @SerializedName("admin_state") private String adminState; + private Boolean force; - public AppInstance(String name, int size, int totalIops, int replicaCount) { - this.name = name; - StorageInstance storageInstance = new StorageInstance(size, totalIops, replicaCount); - this.storageInstances = new HashMap(); - this.storageInstances.put(DEFAULT_STORAGE_NAME, storageInstance); - this.accessControlMode = DEFAULT_ACL; - this.createMode = DEFAULT_CREATE_MODE; - } - public AppInstance(String name, int size, int totalIops, int replicaCount, String placementMode, - String ipPool) { + public AppInstance(String name, String description, int size, int totalBandwidthKiBps, int replicaCount) { this.name = name; - StorageInstance storageInstance = new StorageInstance(size, totalIops, replicaCount, placementMode, ipPool); - this.storageInstances = new HashMap(); - this.storageInstances.put(DEFAULT_STORAGE_NAME, storageInstance); + this.description = description; + StorageInstance storageInstance = new StorageInstance(size, totalBandwidthKiBps, replicaCount); + this.storageInstances = new ArrayList<>(); + this.storageInstances.add(storageInstance); this.accessControlMode = DEFAULT_ACL; - this.createMode = DEFAULT_CREATE_MODE; } public AppInstance(AppState state) { @@ -294,19 +274,36 @@ public AppInstance(AppState state) { this.force = true; } - public AppInstance(String name, String cloneSrc) { + public AppInstance(String name, String description, Volume cloneSrc) { this.name = name; - this.cloneSrc = cloneSrc; + this.description = description; + this.cloneVolumeSrc = cloneSrc; + } + + public AppInstance(String name, String description, VolumeSnapshot cloneSrc) { + this.name = name; + this.description = description; + this.cloneSnapshotSrc = cloneSrc; } public String getIqn() { - StorageInstance storageInstance = storageInstances.get(DEFAULT_STORAGE_NAME); + StorageInstance storageInstance = storageInstances.get(0); return storageInstance.getAccess().getIqn(); } - public int getTotalIops() { - StorageInstance storageInstance = storageInstances.get(DEFAULT_STORAGE_NAME); - return storageInstance.getVolume().getPerformancePolicy().getTotalIops(); + // Commenting this out because we are using bandwidth instead for now + /* public int getTotalIops() { + StorageInstance storageInstance = storageInstances.get(DEFAULT_STORAGE_NAME) ; + PerformancePolicy performancePolicy = storageInstance.getVolume().getPerformancePolicy(); + + return performancePolicy == null? -1 : performancePolicy.getTotalIops(); + }*/ + + public int getTotalBandwidthKiBps() { + StorageInstance storageInstance = storageInstances.get(0) ; + PerformancePolicy performancePolicy = storageInstance.getVolume().getPerformancePolicy(); + + return performancePolicy == null? -1 : performancePolicy.getTotalBandwidth(); } public String getName() { @@ -314,27 +311,22 @@ public String getName() { } public int getSize() { - StorageInstance storageInstance = storageInstances.get(DEFAULT_STORAGE_NAME); + StorageInstance storageInstance = storageInstances.get(0); return storageInstance.getSize(); } - public String getVolumePath() { - StorageInstance storageInstance = storageInstances.get(DEFAULT_STORAGE_NAME); + public String getVolumePath(){ + StorageInstance storageInstance = storageInstances.get(0); return storageInstance.getVolume().getPath(); } - public String getVolumeOpState() { - StorageInstance storageInstance = storageInstances.get(DEFAULT_STORAGE_NAME); + public String getVolumeOpState(){ + StorageInstance storageInstance = storageInstances.get(0); return storageInstance.getVolume().getOpState(); } - } - public static class AccessNetworkIpPool { - @SerializedName("ip_pool") - private String ipPool; - - public AccessNetworkIpPool(String ipPool) { - this.ipPool = new StringBuilder("/access_network_ip_pools/").append(ipPool).toString(); + public String getAdminState() { + return adminState; } } @@ -344,13 +336,15 @@ public static class Initiator { private String name; private String path; private String op; + private boolean force; - public Initiator(String name, String id) { + public Initiator(String name, String id, boolean force) { this.id = id; this.name = name; + this.force = force; } - public Initiator(String path, DateraOperation op) { + public Initiator(String path, DateraOperation op){ this.path = path; this.op = op.toString(); } @@ -363,13 +357,15 @@ public String getPath() { public static class InitiatorGroup { private String name; - private List members; + private List members; private String path; private String op; + private boolean force; - public InitiatorGroup(String name, List members) { + public InitiatorGroup(String name, List members, boolean force) { this.name = name; this.members = members; + this.force = force; } public InitiatorGroup(String path, DateraOperation op) { @@ -385,11 +381,12 @@ public String getName() { return name; } - public List getMembers() { + public List getMembers() { return members; } } + public static class VolumeSnapshot { private String uuid; @@ -399,8 +396,12 @@ public static class VolumeSnapshot { @SerializedName("op_state") private String opState; - VolumeSnapshot(String uuid) { - this.uuid = uuid; + + VolumeSnapshot() { + } + + VolumeSnapshot(String path) { + this.path = path; } public String getTimestamp() { @@ -411,21 +412,11 @@ public String getOpState() { return opState; } - public String getPath() { + public String getPath(){ return path; } } - public static class VolumeSnapshotRestore { - - @SerializedName("restore_point") - private String restorePoint; - - VolumeSnapshotRestore(String restorePoint) { - this.restorePoint = restorePoint; - } - } - public static class DateraError extends Exception { private String name; @@ -450,7 +441,7 @@ public boolean isError() { public String getMessage() { - String errMesg = name + "\n"; + String errMesg = name + "\n"; if (message != null) { errMesg += message + "\n"; } @@ -463,7 +454,7 @@ public String getMessage() { return errMesg; } - public String getName() { + public String getName(){ return name; } } diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/util/DateraUtil.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/util/DateraUtil.java index e6d1e1c8e18f..8577073baa87 100644 --- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/util/DateraUtil.java +++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/util/DateraUtil.java @@ -20,9 +20,10 @@ import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.utils.exception.CloudRuntimeException; -import com.google.common.base.Preconditions; import com.google.gson.Gson; import com.google.gson.GsonBuilder; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; import com.google.gson.reflect.TypeToken; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; @@ -50,18 +51,15 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.Objects; import java.util.StringTokenizer; -import java.util.UUID; public class DateraUtil { private static final Logger s_logger = Logger.getLogger(DateraUtil.class); - private static final String API_VERSION = "v2"; + private static final String API_VERSION = "v2.1"; public static final String PROVIDER_NAME = "Datera"; - public static final String DRIVER_VERSION = "4.13.0-v2.0.0"; private static final String HEADER_AUTH_TOKEN = "auth-token"; private static final String HEADER_CONTENT_TYPE = "Content-type"; @@ -72,44 +70,36 @@ public class DateraUtil { public static final String MANAGEMENT_PORT = "mPort"; public static final String STORAGE_PORT = "sPort"; - public static final String DEFAULT_IP_POOL = "default"; private static final int DEFAULT_MANAGEMENT_PORT = 7717; private static final int DEFAULT_STORAGE_PORT = 3260; private static final int DEFAULT_NUM_REPLICAS = 3; - private static final long ONEGIB_BYTES = 1073741824; - - private static final String DEFAULT_VOL_PLACEMENT = "hybrid"; - public static final String CLUSTER_ADMIN_USERNAME = "clusterAdminUsername"; public static final String CLUSTER_ADMIN_PASSWORD = "clusterAdminPassword"; public static final String CLUSTER_DEFAULT_MIN_IOPS = "clusterDefaultMinIops"; public static final String CLUSTER_DEFAULT_MAX_IOPS = "clusterDefaultMaxIops"; public static final String NUM_REPLICAS = "numReplicas"; - public static final String VOL_PLACEMENT = "volPlacement"; public static final String STORAGE_POOL_ID = "DateraStoragePoolId"; public static final String VOLUME_SIZE = "DateraVolumeSize"; public static final String VOLUME_ID = "DateraVolumeId"; public static final String SNAPSHOT_ID = "DateraSnapshotId"; public static final String TEMP_VOLUME_ID = "tempVolumeId"; - public static final String IP_POOL = "ipPool"; - public static final int MAX_IOPS = 10000; // max IOPS that can be assigned to a volume + public static final int MAX_IOPS = 30000; // max IOPS that can be assigned to a volume - public static final String INITIATOR_GROUP_PREFIX = "CS-InitiatorGroup"; - public static final String INITIATOR_PREFIX = "CS-Initiator"; - public static final String APPINSTANCE_PREFIX = "CS"; - public static final int APPINSTANCE_MAX_LENTH = 64; + public static final String INITIATOR_GROUP_PREFIX = "Cloudstack-InitiatorGroup"; + public static final String INITIATOR_PREFIX = "Cloudstack-Initiator"; + public static final String APPINSTANCE_PREFIX = "Cloudstack"; public static final int MIN_NUM_REPLICAS = 1; public static final int MAX_NUM_REPLICAS = 5; - public static final int POLL_TIMEOUT_MS = 3000; + public static final int POLL_TIMEOUT_MS = 1000; public static final String STATE_AVAILABLE = "available"; - public static final int DEFAULT_RETRIES = 10; + public static final int DEFAULT_RETRIES = 6; private static Gson gson = new GsonBuilder().create(); @@ -119,7 +109,6 @@ public class DateraUtil { private String password; private static final String SCHEME_HTTP = "http"; - private static final int UUID_LENGTH = 8; public DateraUtil(String managementIp, int managementPort, String username, String password) { this.managementPort = managementPort; @@ -128,8 +117,7 @@ public DateraUtil(String managementIp, int managementPort, String username, Stri this.password = password; } - public static String login(DateraObject.DateraConnection conn) - throws UnsupportedEncodingException, DateraObject.DateraError { + public static String login(DateraObject.DateraConnection conn) throws UnsupportedEncodingException, DateraObject.DateraError { DateraObject.DateraLogin loginParams = new DateraObject.DateraLogin(conn.getUsername(), conn.getPassword()); HttpPut loginReq = new HttpPut(generateApiUrl("login")); @@ -138,38 +126,34 @@ public static String login(DateraObject.DateraConnection conn) loginReq.setEntity(jsonParams); String response = executeHttp(conn, loginReq); - DateraObject.DateraLoginResponse loginResponse = gson.fromJson(response, - DateraObject.DateraLoginResponse.class); + DateraObject.DateraLoginResponse loginResponse = gson.fromJson(response, DateraObject.DateraLoginResponse.class); return loginResponse.getKey(); } - public static Map getAppInstances(DateraObject.DateraConnection conn) - throws DateraObject.DateraError { + public static List getAppInstances(DateraObject.DateraConnection conn) throws DateraObject.DateraError { HttpGet getAppInstancesReq = new HttpGet(generateApiUrl("app_instances")); String response = null; response = executeApiRequest(conn, getAppInstancesReq); - Type responseType = new TypeToken>() { - }.getType(); + Type responseType = new TypeToken>() {}.getType(); return gson.fromJson(response, responseType); } - public static DateraObject.AppInstance getAppInstance(DateraObject.DateraConnection conn, String name) - throws DateraObject.DateraError { + public static DateraObject.AppInstance getAppInstance(DateraObject.DateraConnection conn, String name) throws DateraObject.DateraError { HttpGet url = new HttpGet(generateApiUrl("app_instances", name)); - String response = null; + String r = null; try { - response = executeApiRequest(conn, url); - return gson.fromJson(response, DateraObject.AppInstance.class); + r = executeApiRequest(conn, url); + return gson.fromJson(r, DateraObject.AppInstance.class); } catch (DateraObject.DateraError dateraError) { - if (DateraObject.DateraErrorTypes.NotFoundError.equals(dateraError)) { + if (DateraObject.DateraErrorTypes.NotFoundError.equals(dateraError)){ return null; } else { throw dateraError; @@ -177,17 +161,19 @@ public static DateraObject.AppInstance getAppInstance(DateraObject.DateraConnect } } - public static DateraObject.PerformancePolicy getAppInstancePerformancePolicy(DateraObject.DateraConnection conn, - String appInstanceName) throws DateraObject.DateraError { + public static DateraObject.PerformancePolicy getAppInstancePerformancePolicy(DateraObject.DateraConnection conn, String appInstanceName) throws DateraObject.DateraError { - HttpGet url = new HttpGet(generateApiUrl("app_instances", appInstanceName, "storage_instances", - DateraObject.DEFAULT_STORAGE_NAME, "volumes", DateraObject.DEFAULT_VOLUME_NAME, "performance_policy")); + HttpGet url = new HttpGet(generateApiUrl( + "app_instances", appInstanceName, + "storage_instances", DateraObject.DEFAULT_STORAGE_NAME, + "volumes", DateraObject.DEFAULT_VOLUME_NAME, + "performance_policy")); try { String response = executeApiRequest(conn, url); return gson.fromJson(response, DateraObject.PerformancePolicy.class); } catch (DateraObject.DateraError dateraError) { - if (DateraObject.DateraErrorTypes.NotFoundError.equals(dateraError)) { + if (DateraObject.DateraErrorTypes.NotFoundError.equals(dateraError)){ return null; } else { throw dateraError; @@ -196,13 +182,15 @@ public static DateraObject.PerformancePolicy getAppInstancePerformancePolicy(Dat } - public static DateraObject.PerformancePolicy createAppInstancePerformancePolicy(DateraObject.DateraConnection conn, - String appInstanceName, int totalIops) throws UnsupportedEncodingException, DateraObject.DateraError { + public static DateraObject.PerformancePolicy createAppInstancePerformancePolicy(DateraObject.DateraConnection conn, String appInstanceName, int totalBandwidthKiBps) throws UnsupportedEncodingException, DateraObject.DateraError { - HttpPost url = new HttpPost(generateApiUrl("app_instances", appInstanceName, "storage_instances", - DateraObject.DEFAULT_STORAGE_NAME, "volumes", DateraObject.DEFAULT_VOLUME_NAME, "performance_policy")); + HttpPost url = new HttpPost(generateApiUrl( + "app_instances", appInstanceName, + "storage_instances", DateraObject.DEFAULT_STORAGE_NAME, + "volumes", DateraObject.DEFAULT_VOLUME_NAME, + "performance_policy")); - DateraObject.PerformancePolicy performancePolicy = new DateraObject.PerformancePolicy(totalIops); + DateraObject.PerformancePolicy performancePolicy = new DateraObject.PerformancePolicy(totalBandwidthKiBps); url.setEntity(new StringEntity(gson.toJson(performancePolicy))); @@ -211,49 +199,56 @@ public static DateraObject.PerformancePolicy createAppInstancePerformancePolicy( return gson.fromJson(response, DateraObject.PerformancePolicy.class); } - public static void updateAppInstanceIops(DateraObject.DateraConnection conn, String appInstance, int totalIops) - throws UnsupportedEncodingException, DateraObject.DateraError { + public static void updateAppInstanceIops(DateraObject.DateraConnection conn, String appInstance, int totalBandWidthKiBps) throws UnsupportedEncodingException, DateraObject.DateraError { if (getAppInstancePerformancePolicy(conn, appInstance) == null) { - createAppInstancePerformancePolicy(conn, appInstance, totalIops); + createAppInstancePerformancePolicy(conn, appInstance, totalBandWidthKiBps); } else { - HttpPut url = new HttpPut( - generateApiUrl("app_instances", appInstance, "storage_instances", DateraObject.DEFAULT_STORAGE_NAME, - "volumes", DateraObject.DEFAULT_VOLUME_NAME, "performance_policy")); + HttpPut url = new HttpPut(generateApiUrl( + "app_instances", appInstance, + "storage_instances", DateraObject.DEFAULT_STORAGE_NAME, + "volumes", DateraObject.DEFAULT_VOLUME_NAME, + "performance_policy")); - DateraObject.PerformancePolicy performancePolicy = new DateraObject.PerformancePolicy(totalIops); + DateraObject.PerformancePolicy performancePolicy = new DateraObject.PerformancePolicy(totalBandWidthKiBps); url.setEntity(new StringEntity(gson.toJson(performancePolicy))); executeApiRequest(conn, url); } } - public static void updateAppInstanceSize(DateraObject.DateraConnection conn, String appInstanceName, int newSize) - throws UnsupportedEncodingException, DateraObject.DateraError { - - HttpPut url = new HttpPut(generateApiUrl("app_instances", appInstanceName, "storage_instances", - DateraObject.DEFAULT_STORAGE_NAME, "volumes", DateraObject.DEFAULT_VOLUME_NAME)); + private static int toBandwidth(int totalIops) { + return totalIops; + } - DateraObject.Volume volume = new DateraObject.Volume(newSize); - url.setEntity(new StringEntity(gson.toJson(volume))); - executeApiRequest(conn, url); + public static void updateAppInstanceSize(DateraObject.DateraConnection conn, String appInstanceName, int newSize) throws UnsupportedEncodingException, DateraObject.DateraError { + try { - } + updateAppInstanceAdminState(conn, appInstanceName, DateraObject.AppState.OFFLINE); + HttpPut url = new HttpPut(generateApiUrl( + "app_instances", appInstanceName, + "storage_instances", DateraObject.DEFAULT_STORAGE_NAME, + "volumes", DateraObject.DEFAULT_VOLUME_NAME)); - public static void updateAppInstancePlacement(DateraObject.DateraConnection conn, String appInstanceName, - String newPlacementMode) throws UnsupportedEncodingException, DateraObject.DateraError { - HttpPut url = new HttpPut(generateApiUrl("app_instances", appInstanceName, "storage_instances", - DateraObject.DEFAULT_STORAGE_NAME, "volumes", DateraObject.DEFAULT_VOLUME_NAME)); + DateraObject.Volume volume = new DateraObject.Volume(newSize); + url.setEntity(new StringEntity(gson.toJson(volume))); + executeApiRequest(conn, url); - DateraObject.Volume volume = new DateraObject.Volume(newPlacementMode); - url.setEntity(new StringEntity(gson.toJson(volume))); - executeApiRequest(conn, url); + } finally { + // bring it back online if something bad happened + try { + updateAppInstanceAdminState(conn, appInstanceName, DateraObject.AppState.ONLINE); + } catch (Exception e){ + s_logger.warn("Error getting appInstance " + appInstanceName + " back online ", e); + } + } } - private static DateraObject.AppInstance createAppInstance(DateraObject.DateraConnection conn, String name, - StringEntity appInstanceEntity) throws DateraObject.DateraError { + + + private static DateraObject.AppInstance createAppInstance(DateraObject.DateraConnection conn, String name, StringEntity appInstanceEntity) throws DateraObject.DateraError { HttpPost createAppInstance = new HttpPost(generateApiUrl("app_instances")); HttpGet getAppInstance = new HttpGet(generateApiUrl("app_instances", name)); @@ -262,66 +257,43 @@ private static DateraObject.AppInstance createAppInstance(DateraObject.DateraCon executeApiRequest(conn, createAppInstance); - // create is async, do a get to fetch the IQN + //create is async, do a get to fetch the IQN executeApiRequest(conn, getAppInstance); - return pollAppInstanceAvailable(conn, name); + return pollVolumeAvailable(conn, name); } - public static DateraObject.AppInstance createAppInstance(DateraObject.DateraConnection conn, String name, int size, - int totalIops, int replicaCount) throws UnsupportedEncodingException, DateraObject.DateraError { + public static DateraObject.AppInstance createAppInstance(DateraObject.DateraConnection conn, String name, String desc, int size, int totalBandwidthKiBps, int replicaCount) throws UnsupportedEncodingException, DateraObject.DateraError { - DateraObject.AppInstance appInstance = new DateraObject.AppInstance(name, size, totalIops, replicaCount, - DEFAULT_VOL_PLACEMENT, DEFAULT_IP_POOL); + DateraObject.AppInstance appInstance = new DateraObject.AppInstance(name, desc, size, totalBandwidthKiBps, replicaCount); StringEntity appInstanceEntity = new StringEntity(gson.toJson(appInstance)); return createAppInstance(conn, name, appInstanceEntity); } - public static DateraObject.AppInstance createAppInstance(DateraObject.DateraConnection conn, String name, int size, - int totalIops, int replicaCount, String placementMode, String ipPool) - throws UnsupportedEncodingException, DateraObject.DateraError { + public static DateraObject.AppInstance cloneAppInstanceFromVolume(DateraObject.DateraConnection conn, String name, String desc, String srcCloneName) throws UnsupportedEncodingException, DateraObject.DateraError { - DateraObject.AppInstance appInstance = new DateraObject.AppInstance(name, size, totalIops, replicaCount, - placementMode, ipPool); - StringEntity appInstanceEntity = new StringEntity(gson.toJson(appInstance)); - - return createAppInstance(conn, name, appInstanceEntity); - } - - public static DateraObject.AppInstance cloneAppInstanceFromVolume(DateraObject.DateraConnection conn, String name, - String srcCloneName) throws UnsupportedEncodingException, DateraObject.DateraError { - return null; - } - - public static DateraObject.AppInstance cloneAppInstanceFromVolume(DateraObject.DateraConnection conn, String name, - String srcCloneName, String ipPool) throws UnsupportedEncodingException, DateraObject.DateraError { - s_logger.debug("cloneAppInstanceFromVolume() called"); DateraObject.AppInstance srcAppInstance = getAppInstance(conn, srcCloneName); - if (srcAppInstance == null) { - throw new DateraObject.DateraError("NotFoundError", 404, null, - "Unable to find the base app instance to clone from"); + if (srcAppInstance == null){ + throw new DateraObject.DateraError("NotFoundError", 404, null, "Unable to find the base app instance to clone from"); } - String srcClonePath = srcAppInstance.getVolumePath(); + String srcClonePath = srcAppInstance.getVolumePath(); + DateraObject.Volume cloneVolume = new DateraObject.Volume(srcClonePath); - DateraObject.AppInstance appInstanceObj = new DateraObject.AppInstance(name, srcClonePath); + DateraObject.AppInstance appInstanceObj = new DateraObject.AppInstance(name, desc, cloneVolume); StringEntity appInstanceEntity = new StringEntity(gson.toJson(appInstanceObj)); DateraObject.AppInstance appInstance = createAppInstance(conn, name, appInstanceEntity); - // Update ipPool - updateAppInstanceIpPool(conn, name, ipPool); - - // bring it online + //bring it online updateAppInstanceAdminState(conn, name, DateraObject.AppState.ONLINE); return getAppInstance(conn, name); } - public static DateraObject.AppInstance pollAppInstanceAvailable(DateraObject.DateraConnection conn, - String appInstanceName) throws DateraObject.DateraError { + public static DateraObject.AppInstance pollVolumeAvailable(DateraObject.DateraConnection conn, String appInstanceName) throws DateraObject.DateraError { int retries = DateraUtil.DEFAULT_RETRIES; DateraObject.AppInstance appInstance = null; @@ -333,25 +305,56 @@ public static DateraObject.AppInstance pollAppInstanceAvailable(DateraObject.Dat return null; } retries--; - } while ((appInstance != null && !Objects.equals(appInstance.getVolumeOpState(), DateraUtil.STATE_AVAILABLE)) - && retries > 0); + } while ((appInstance != null && !Objects.equals(appInstance.getVolumeOpState(), DateraUtil.STATE_AVAILABLE)) && retries>0); return appInstance; } - public static DateraObject.Initiator createInitiator(DateraObject.DateraConnection conn, String name, String iqn) - throws DateraObject.DateraError, UnsupportedEncodingException { + public static DateraObject.AppInstance pollAppInstanceAvailable(DateraObject.DateraConnection conn, String appInstanceName) throws DateraObject.DateraError { - HttpPost req = new HttpPost(generateApiUrl("initiators")); + int retries = DateraUtil.DEFAULT_RETRIES; + DateraObject.AppInstance appInstance = null; + do { + appInstance = getAppInstance(conn, appInstanceName); + try { + Thread.sleep(DateraUtil.POLL_TIMEOUT_MS); + } catch (InterruptedException e) { + return null; + } + retries--; + } while ((appInstance != null && !Objects.equals(appInstance.getAdminState(), DateraObject.AppState.ONLINE.toString())) && retries>0); + return appInstance; + } + + public static List getInitiators(DateraObject.DateraConnection conn) throws DateraObject.DateraError { - DateraObject.Initiator initiator = new DateraObject.Initiator(name, iqn); + HttpGet getInitiators = new HttpGet(generateApiUrl("initiators")); + String response = null; + + response = executeApiRequest(conn, getInitiators); + + Type responseType = new TypeToken>() {}.getType(); + + return gson.fromJson(response, responseType); + } + + public static DateraObject.Initiator createInitiator(DateraObject.DateraConnection conn, String name, String iqn) throws DateraObject.DateraError, UnsupportedEncodingException { + HttpPost req = new HttpPost(generateApiUrl("initiators" )); + boolean force = false; + + //if we don't have any initiators, set the force flag to true. + List initiators = getInitiators(conn); + if (initiators.size() == 0) { + force = true; + } + + DateraObject.Initiator initiator = new DateraObject.Initiator(name, iqn, force); StringEntity httpEntity = new StringEntity(gson.toJson(initiator)); req.setEntity(httpEntity); return gson.fromJson(executeApiRequest(conn, req), DateraObject.Initiator.class); } - public static DateraObject.Initiator getInitiator(DateraObject.DateraConnection conn, String iqn) - throws DateraObject.DateraError { + public static DateraObject.Initiator getInitiator(DateraObject.DateraConnection conn, String iqn) throws DateraObject.DateraError { try { HttpGet getReq = new HttpGet(generateApiUrl("initiators", iqn)); @@ -372,12 +375,18 @@ public static void deleteInitiator(DateraObject.DateraConnection conn, String iq executeApiRequest(conn, req); } - public static DateraObject.InitiatorGroup createInitiatorGroup(DateraObject.DateraConnection conn, String name) - throws UnsupportedEncodingException, DateraObject.DateraError { + public static DateraObject.InitiatorGroup createInitiatorGroup(DateraObject.DateraConnection conn, String name) throws UnsupportedEncodingException, DateraObject.DateraError { HttpPost createReq = new HttpPost(generateApiUrl("initiator_groups")); + boolean force = false; - DateraObject.InitiatorGroup group = new DateraObject.InitiatorGroup(name, Collections.emptyList()); + //if we don't have any initiators, set the force flag to true. + List initiators = getInitiators(conn); + if (initiators.size() == 0) { + force = true; + } + + DateraObject.InitiatorGroup group = new DateraObject.InitiatorGroup(name, Collections.emptyList(), force); StringEntity httpEntity = new StringEntity(gson.toJson(group)); createReq.setEntity(httpEntity); @@ -386,19 +395,16 @@ public static DateraObject.InitiatorGroup createInitiatorGroup(DateraObject.Date return gson.fromJson(response, DateraObject.InitiatorGroup.class); } - public static void deleteInitatorGroup(DateraObject.DateraConnection conn, String name) - throws DateraObject.DateraError { + public static void deleteInitatorGroup(DateraObject.DateraConnection conn, String name) throws DateraObject.DateraError { HttpDelete delReq = new HttpDelete(generateApiUrl("initiator_groups", name)); executeApiRequest(conn, delReq); } - public static DateraObject.InitiatorGroup getInitiatorGroup(DateraObject.DateraConnection conn, String name) - throws DateraObject.DateraError { + public static DateraObject.InitiatorGroup getInitiatorGroup(DateraObject.DateraConnection conn, String name) throws DateraObject.DateraError { try { HttpGet getReq = new HttpGet(generateApiUrl("initiator_groups", name)); - String response = executeApiRequest(conn, getReq); - return gson.fromJson(response, DateraObject.InitiatorGroup.class); - + String resp = executeApiRequest(conn, getReq); + return gson.fromJson(resp, DateraObject.InitiatorGroup.class); } catch (DateraObject.DateraError dateraError) { if (DateraObject.DateraErrorTypes.NotFoundError.equals(dateraError)) { return null; @@ -408,8 +414,7 @@ public static DateraObject.InitiatorGroup getInitiatorGroup(DateraObject.DateraC } } - public static void updateInitiatorGroup(DateraObject.DateraConnection conn, String initiatorPath, String groupName, - DateraObject.DateraOperation op) throws DateraObject.DateraError, UnsupportedEncodingException { + public static void updateInitiatorGroup(DateraObject.DateraConnection conn, String initiatorPath, String groupName, DateraObject.DateraOperation op) throws DateraObject.DateraError, UnsupportedEncodingException { DateraObject.InitiatorGroup initiatorGroup = getInitiatorGroup(conn, groupName); @@ -425,35 +430,32 @@ public static void updateInitiatorGroup(DateraObject.DateraConnection conn, Stri executeApiRequest(conn, addReq); } - public static void addInitiatorToGroup(DateraObject.DateraConnection conn, String initiatorPath, String groupName) - throws UnsupportedEncodingException, DateraObject.DateraError { + public static void addInitiatorToGroup(DateraObject.DateraConnection conn, String initiatorPath, String groupName) throws UnsupportedEncodingException, DateraObject.DateraError { updateInitiatorGroup(conn, initiatorPath, groupName, DateraObject.DateraOperation.ADD); } - public static void removeInitiatorFromGroup(DateraObject.DateraConnection conn, String initiatorPath, - String groupName) throws DateraObject.DateraError, UnsupportedEncodingException { + public static void removeInitiatorFromGroup(DateraObject.DateraConnection conn, String initiatorPath, String groupName) throws DateraObject.DateraError, UnsupportedEncodingException { updateInitiatorGroup(conn, initiatorPath, groupName, DateraObject.DateraOperation.REMOVE); } - public static Map getAppInstanceInitiatorGroups( - DateraObject.DateraConnection conn, String appInstance) throws DateraObject.DateraError { - HttpGet req = new HttpGet(generateApiUrl("app_instances", appInstance, "storage_instances", - DateraObject.DEFAULT_STORAGE_NAME, "acl_policy", "initiator_groups")); + public static List getAppInstanceInitiatorGroups(DateraObject.DateraConnection conn, String appInstance) throws DateraObject.DateraError { + HttpGet req = new HttpGet(generateApiUrl( + "app_instances", appInstance, + "storage_instances", DateraObject.DEFAULT_STORAGE_NAME, + "acl_policy", "initiator_groups")); String response = executeApiRequest(conn, req); - if (response == null) { return null; } - Type responseType = new TypeToken>() { - }.getType(); + + Type responseType = new TypeToken>() {}.getType(); return gson.fromJson(response, responseType); } - public static void assignGroupToAppInstance(DateraObject.DateraConnection conn, String group, String appInstance) - throws DateraObject.DateraError, UnsupportedEncodingException { + public static void assignGroupToAppInstance(DateraObject.DateraConnection conn, String group, String appInstance) throws DateraObject.DateraError, UnsupportedEncodingException { DateraObject.InitiatorGroup initiatorGroup = getInitiatorGroup(conn, group); @@ -461,30 +463,37 @@ public static void assignGroupToAppInstance(DateraObject.DateraConnection conn, throw new CloudRuntimeException("Initator group " + group + " not found "); } - Map initiatorGroups = getAppInstanceInitiatorGroups(conn, appInstance); + List initiatorGroups = getAppInstanceInitiatorGroups(conn, appInstance); if (initiatorGroups == null) { throw new CloudRuntimeException("Initator group not found for appInstnace " + appInstance); } - for (DateraObject.InitiatorGroup ig : initiatorGroups.values()) { + for(DateraObject.InitiatorGroup ig : initiatorGroups) { if (ig.getName().equals(group)) { - // already assigned + //already assigned return; } } - HttpPut url = new HttpPut(generateApiUrl("app_instances", appInstance, "storage_instances", - DateraObject.DEFAULT_STORAGE_NAME, "acl_policy", "initiator_groups")); + HttpPut url = new HttpPut(generateApiUrl( + "app_instances", appInstance, + "storage_instances", DateraObject.DEFAULT_STORAGE_NAME, + "acl_policy", "initiator_groups")); - url.setEntity(new StringEntity(gson - .toJson(new DateraObject.InitiatorGroup(initiatorGroup.getPath(), DateraObject.DateraOperation.ADD)))); + url.setEntity(new StringEntity( + gson.toJson( + new DateraObject.InitiatorGroup( + initiatorGroup.getPath(), + DateraObject.DateraOperation.ADD + ) + ) + )); executeApiRequest(conn, url); } - public static void removeGroupFromAppInstance(DateraObject.DateraConnection conn, String group, String appInstance) - throws DateraObject.DateraError, UnsupportedEncodingException { + public static void removeGroupFromAppInstance(DateraObject.DateraConnection conn, String group, String appInstance) throws DateraObject.DateraError, UnsupportedEncodingException { DateraObject.InitiatorGroup initiatorGroup = getInitiatorGroup(conn, group); @@ -492,7 +501,7 @@ public static void removeGroupFromAppInstance(DateraObject.DateraConnection conn throw new CloudRuntimeException("Initator groups not found for appInstnace " + appInstance); } - Map initiatorGroups = getAppInstanceInitiatorGroups(conn, appInstance); + List initiatorGroups = getAppInstanceInitiatorGroups(conn, appInstance); if (initiatorGroups == null) { throw new CloudRuntimeException("Initator group not found for appInstnace " + appInstance); @@ -500,7 +509,7 @@ public static void removeGroupFromAppInstance(DateraObject.DateraConnection conn boolean groupAssigned = false; - for (DateraObject.InitiatorGroup ig : initiatorGroups.values()) { + for(DateraObject.InitiatorGroup ig : initiatorGroups) { if (ig.getName().equals(group)) { groupAssigned = true; break; @@ -508,20 +517,27 @@ public static void removeGroupFromAppInstance(DateraObject.DateraConnection conn } if (!groupAssigned) { - return; // already removed + return; // already removed } - HttpPut url = new HttpPut(generateApiUrl("app_instances", appInstance, "storage_instances", - DateraObject.DEFAULT_STORAGE_NAME, "acl_policy", "initiator_groups")); + HttpPut url = new HttpPut(generateApiUrl( + "app_instances", appInstance, + "storage_instances", DateraObject.DEFAULT_STORAGE_NAME, + "acl_policy", "initiator_groups")); - url.setEntity(new StringEntity(gson.toJson( - new DateraObject.InitiatorGroup(initiatorGroup.getPath(), DateraObject.DateraOperation.REMOVE)))); + url.setEntity(new StringEntity( + gson.toJson( + new DateraObject.InitiatorGroup( + initiatorGroup.getPath(), + DateraObject.DateraOperation.REMOVE + ) + ) + )); executeApiRequest(conn, url); } - public static void updateAppInstanceAdminState(DateraObject.DateraConnection conn, String appInstanceName, - DateraObject.AppState appState) throws UnsupportedEncodingException, DateraObject.DateraError { + public static void updateAppInstanceAdminState(DateraObject.DateraConnection conn, String appInstanceName, DateraObject.AppState appState) throws UnsupportedEncodingException, DateraObject.DateraError { DateraObject.AppInstance appInstance = new DateraObject.AppInstance(appState); HttpPut updateAppInstanceReq = new HttpPut(generateApiUrl("app_instances", appInstanceName)); @@ -530,109 +546,67 @@ public static void updateAppInstanceAdminState(DateraObject.DateraConnection con executeApiRequest(conn, updateAppInstanceReq); } - public static void updateAppInstanceIpPool(DateraObject.DateraConnection conn, String appInstanceName, - String ipPool) throws UnsupportedEncodingException, DateraObject.DateraError { - - HttpPut url = new HttpPut(generateApiUrl("app_instances", appInstanceName, "storage_instances", - DateraObject.DEFAULT_STORAGE_NAME)); - - url.setEntity(new StringEntity(gson.toJson(new DateraObject.AccessNetworkIpPool(ipPool)))); - - executeApiRequest(conn, url); - } - - public static void deleteAppInstance(DateraObject.DateraConnection conn, String name) - throws UnsupportedEncodingException, DateraObject.DateraError { + public static void deleteAppInstance(DateraObject.DateraConnection conn, String name) throws UnsupportedEncodingException, DateraObject.DateraError { HttpDelete deleteAppInstanceReq = new HttpDelete(generateApiUrl("app_instances", name)); updateAppInstanceAdminState(conn, name, DateraObject.AppState.OFFLINE); executeApiRequest(conn, deleteAppInstanceReq); } - public static DateraObject.AppInstance cloneAppInstanceFromSnapshot(DateraObject.DateraConnection conn, - String newAppInstanceName, String snapshotName) - throws DateraObject.DateraError, UnsupportedEncodingException { - - return cloneAppInstanceFromSnapshot(conn, newAppInstanceName, snapshotName, DEFAULT_IP_POOL); - } - - public static DateraObject.AppInstance cloneAppInstanceFromSnapshot(DateraObject.DateraConnection conn, - String newAppInstanceName, String snapshotName, String ipPool) - throws DateraObject.DateraError, UnsupportedEncodingException { - - // split the snapshot name to appInstanceName and the snapshot timestamp - String[] tokens = snapshotName.split(":"); - Preconditions.checkArgument(tokens.length == 2); - - // A snapshot is stored in Cloudstack as : - String appInstanceName = tokens[0]; - String snapshotTime = tokens[1]; - - // get the snapshot from Datera - HttpGet getSnasphotReq = new HttpGet( - generateApiUrl("app_instances", appInstanceName, "storage_instances", DateraObject.DEFAULT_STORAGE_NAME, - "volumes", DateraObject.DEFAULT_VOLUME_NAME, "snapshots", snapshotTime)); + public static DateraObject.AppInstance cloneAppInstanceFromSnapshot(DateraObject.DateraConnection conn, String newAppInstanceName, String desc, String srcAppInstanceName, String snapshotTime) throws DateraObject.DateraError, UnsupportedEncodingException { + //get the snapshot from Datera + HttpGet getSnasphotReq = new HttpGet(generateApiUrl("app_instances", srcAppInstanceName, + "storage_instances", DateraObject.DEFAULT_STORAGE_NAME, + "volumes", DateraObject.DEFAULT_VOLUME_NAME, + "snapshots", snapshotTime)); String resp = executeApiRequest(conn, getSnasphotReq); DateraObject.VolumeSnapshot snapshot = gson.fromJson(resp, DateraObject.VolumeSnapshot.class); String snapshotPath = snapshot.getPath(); + DateraObject.VolumeSnapshot cloneSnapshot = new DateraObject.VolumeSnapshot(snapshotPath); - DateraObject.AppInstance appInstanceObj = new DateraObject.AppInstance(newAppInstanceName, snapshotPath); + DateraObject.AppInstance appInstanceObj = new DateraObject.AppInstance(newAppInstanceName, desc, cloneSnapshot); StringEntity appInstanceEntity = new StringEntity(gson.toJson(appInstanceObj)); DateraObject.AppInstance appInstance = createAppInstance(conn, newAppInstanceName, appInstanceEntity); - // Update ipPool - updateAppInstanceIpPool(conn, newAppInstanceName, ipPool); - - // bring it online + //bring it online updateAppInstanceAdminState(conn, newAppInstanceName, DateraObject.AppState.ONLINE); return getAppInstance(conn, newAppInstanceName); } - public static void deleteVolumeSnapshot(DateraObject.DateraConnection conn, String snapshotName) - throws DateraObject.DateraError { - - // split the snapshot name to appInstanceName and the snapshot timestamp - String[] tokens = snapshotName.split(":"); - Preconditions.checkArgument(tokens.length == 2); + public static void deleteVolumeSnapshot(DateraObject.DateraConnection conn, String appInstanceName, String snapshotTime) throws DateraObject.DateraError { - // A snapshot is stored in Cloudstack as : - String appInstanceName = tokens[0]; - String snapshotTime = tokens[1]; - - HttpDelete deleteSnapshotReq = new HttpDelete( - generateApiUrl("app_instances", appInstanceName, "storage_instances", DateraObject.DEFAULT_STORAGE_NAME, - "volumes", DateraObject.DEFAULT_VOLUME_NAME, "snapshots", snapshotTime)); + HttpDelete deleteSnapshotReq = new HttpDelete(generateApiUrl("app_instances", appInstanceName, + "storage_instances", DateraObject.DEFAULT_STORAGE_NAME, + "volumes", DateraObject.DEFAULT_VOLUME_NAME, + "snapshots", snapshotTime)); executeApiRequest(conn, deleteSnapshotReq); } - public static DateraObject.VolumeSnapshot getVolumeSnapshot(DateraObject.DateraConnection conn, - String appInstanceName, String snapshotTime) throws DateraObject.DateraError { + public static DateraObject.VolumeSnapshot getVolumeSnapshot(DateraObject.DateraConnection conn, String appInstanceName, String snapshotTime) throws DateraObject.DateraError { - HttpGet getSnapshotReq = new HttpGet( - generateApiUrl("app_instances", appInstanceName, "storage_instances", DateraObject.DEFAULT_STORAGE_NAME, - "volumes", DateraObject.DEFAULT_VOLUME_NAME, "snapshots", snapshotTime)); + HttpGet getSnapshotReq = new HttpGet(generateApiUrl("app_instances", appInstanceName, + "storage_instances", DateraObject.DEFAULT_STORAGE_NAME, + "volumes", DateraObject.DEFAULT_VOLUME_NAME, + "snapshots", snapshotTime)); String resp = executeApiRequest(conn, getSnapshotReq); return gson.fromJson(resp, DateraObject.VolumeSnapshot.class); } - public static DateraObject.VolumeSnapshot takeVolumeSnapshot(DateraObject.DateraConnection conn, - String baseAppInstanceName) throws UnsupportedEncodingException, DateraObject.DateraError { + public static DateraObject.VolumeSnapshot takeVolumeSnapshot(DateraObject.DateraConnection conn, String baseAppInstanceName) throws UnsupportedEncodingException, DateraObject.DateraError { - HttpPost takeSnasphotReq = new HttpPost( - generateApiUrl("app_instances", baseAppInstanceName, "storage_instances", - DateraObject.DEFAULT_STORAGE_NAME, "volumes", DateraObject.DEFAULT_VOLUME_NAME, "snapshots")); + HttpPost takeSnasphotReq = new HttpPost(generateApiUrl("app_instances", baseAppInstanceName, + "storage_instances", DateraObject.DEFAULT_STORAGE_NAME, + "volumes", DateraObject.DEFAULT_VOLUME_NAME, "snapshots")); - String snapshotUuid = UUID.randomUUID().toString(); - DateraObject.VolumeSnapshot volumeSnapshot = new DateraObject.VolumeSnapshot(snapshotUuid); - takeSnasphotReq.setEntity(new StringEntity(gson.toJson(volumeSnapshot))); + DateraObject.VolumeSnapshot volumeSnapshot; String snapshotResponse = executeApiRequest(conn, takeSnasphotReq); volumeSnapshot = gson.fromJson(snapshotResponse, DateraObject.VolumeSnapshot.class); String snapshotTime = volumeSnapshot.getTimestamp(); @@ -646,67 +620,28 @@ public static DateraObject.VolumeSnapshot takeVolumeSnapshot(DateraObject.Datera return null; } volumeSnapshot = getVolumeSnapshot(conn, baseAppInstanceName, snapshotTime); - } while ((!Objects.equals(volumeSnapshot.getOpState(), DateraUtil.STATE_AVAILABLE)) && --retries > 0); + } while ((!Objects.equals(volumeSnapshot.getOpState(), DateraUtil.STATE_AVAILABLE)) && --retries>0); return volumeSnapshot; } - public static DateraObject.AppInstance restoreVolumeSnapshot(DateraObject.DateraConnection conn, - String snapshotName) throws DateraObject.DateraError { - - // split the snapshot name to appInstanceName and the snapshot timestamp - String[] tokens = snapshotName.split(":"); - Preconditions.checkArgument(tokens.length == 2); - - // A snapshot is stored in Cloudstack as : - String appInstanceName = tokens[0]; - String snapshotTime = tokens[1]; - - HttpPut restoreSnapshotReq = new HttpPut(generateApiUrl("app_instances", appInstanceName, "storage_instances", - DateraObject.DEFAULT_STORAGE_NAME, "volumes", DateraObject.DEFAULT_VOLUME_NAME)); - - try { - // bring appInstance offline - updateAppInstanceAdminState(conn, appInstanceName, DateraObject.AppState.OFFLINE); - - DateraObject.VolumeSnapshotRestore volumeSnapshotRestore = new DateraObject.VolumeSnapshotRestore( - snapshotTime); - - StringEntity jsonParams = new StringEntity(gson.toJson(volumeSnapshotRestore)); - restoreSnapshotReq.setEntity(jsonParams); - executeApiRequest(conn, restoreSnapshotReq); - // bring appInstance online - updateAppInstanceAdminState(conn, appInstanceName, DateraObject.AppState.ONLINE); - - } catch (UnsupportedEncodingException e) { - throw new CloudRuntimeException("Failed to restore volume snapshot" + e.getMessage()); - } - return getAppInstance(conn, appInstanceName); - - } - - private static String executeApiRequest(DateraObject.DateraConnection conn, HttpRequest apiReq) - throws DateraObject.DateraError { + private static String executeApiRequest(DateraObject.DateraConnection conn, HttpRequest apiReq) throws DateraObject.DateraError { - // Get the token first - String authToken = null; - try { - authToken = login(conn); - } catch (UnsupportedEncodingException e) { - throw new CloudRuntimeException("Unable to login to Datera " + e.getMessage()); - } + //Get the token first + String authToken = conn.getToken(); - if (authToken == null) { + if (authToken == null){ throw new CloudRuntimeException("Unable to login to Datera: error getting auth token "); } apiReq.addHeader(HEADER_AUTH_TOKEN, authToken); - - return executeHttp(conn, apiReq); + String resp = executeHttp(conn, apiReq); + JsonParser jsonParser = new JsonParser(); + JsonObject responseObj = (JsonObject) jsonParser.parse(resp); + return responseObj.get("data").toString(); } - private static String executeHttp(DateraObject.DateraConnection conn, HttpRequest request) - throws DateraObject.DateraError { + private static String executeHttp(DateraObject.DateraConnection conn, HttpRequest request) throws DateraObject.DateraError { CloseableHttpClient httpclient = HttpClientBuilder.create().build(); String response = null; @@ -716,6 +651,10 @@ private static String executeHttp(DateraObject.DateraConnection conn, HttpReques try { + long startTime = System.currentTimeMillis(); + String uri = request.getRequestLine().getUri(); + String method = request.getRequestLine().getMethod(); + request.setHeader(HEADER_CONTENT_TYPE, HEADER_VALUE_JSON); HttpHost target = new HttpHost(conn.getManagementIp(), conn.getManagementPort(), SCHEME_HTTP); @@ -739,6 +678,10 @@ private static String executeHttp(DateraObject.DateraConnection conn, HttpReques } + long endTime = System.currentTimeMillis(); + String mesg = String.format("[Datera] %s %s took %d ms", method, uri, endTime - startTime); + s_logger.info(mesg); + } catch (IOException e) { throw new CloudRuntimeException("Error while sending request to Datera. Error " + e.getMessage()); } @@ -775,28 +718,9 @@ public static int getNumReplicas(String url) { try { String value = getValue(DateraUtil.NUM_REPLICAS, url, false); return Integer.parseInt(value); - } catch (NumberFormatException ex) { + }catch (NumberFormatException ex){ return DEFAULT_NUM_REPLICAS; } - - } - - public static String getVolPlacement(String url) { - String volPlacement = getValue(DateraUtil.VOL_PLACEMENT, url, false); - if (volPlacement == null) { - return DEFAULT_VOL_PLACEMENT; - } else { - return volPlacement; - } - } - - public static String getIpPool(String url) { - String ipPool = getValue(DateraUtil.IP_POOL, url, false); - if (ipPool == null) { - return DEFAULT_IP_POOL; - } else { - return ipPool; - } } private static String getVip(String keyToMatch, String url) { @@ -878,8 +802,7 @@ public static String getModifiedUrl(String originalUrl) { while (st.hasMoreElements()) { String token = st.nextElement().toString().toUpperCase(); - if (token.startsWith(DateraUtil.MANAGEMENT_VIP.toUpperCase()) - || token.startsWith(DateraUtil.STORAGE_VIP.toUpperCase())) { + if (token.startsWith(DateraUtil.MANAGEMENT_VIP.toUpperCase()) || token.startsWith(DateraUtil.STORAGE_VIP.toUpperCase())) { sb.append(token).append(delimiter); } } @@ -894,10 +817,8 @@ public static String getModifiedUrl(String originalUrl) { return modifiedUrl; } - public static DateraObject.DateraConnection getDateraConnection(long storagePoolId, - StoragePoolDetailsDao storagePoolDetailsDao) { - StoragePoolDetailVO storagePoolDetail = storagePoolDetailsDao.findDetail(storagePoolId, - DateraUtil.MANAGEMENT_VIP); + public static DateraObject.DateraConnection getDateraConnection(long storagePoolId, StoragePoolDetailsDao storagePoolDetailsDao) { + StoragePoolDetailVO storagePoolDetail = storagePoolDetailsDao.findDetail(storagePoolId, DateraUtil.MANAGEMENT_VIP); String mVip = storagePoolDetail.getValue(); @@ -913,7 +834,13 @@ public static DateraObject.DateraConnection getDateraConnection(long storagePool String clusterAdminPassword = storagePoolDetail.getValue(); - return new DateraObject.DateraConnection(mVip, mPort, clusterAdminUsername, clusterAdminPassword); + try { + return new DateraObject.DateraConnection(mVip, mPort, clusterAdminUsername, clusterAdminPassword) ; + } catch (DateraObject.DateraError | UnsupportedEncodingException dateraError) { + String errMesg = "Unable to connect to Datera"; + s_logger.error(errMesg, dateraError); + throw new CloudRuntimeException(errMesg, dateraError); + } } public static boolean hostsSupport_iScsi(List hosts) { @@ -922,7 +849,7 @@ public static boolean hostsSupport_iScsi(List hosts) { } for (Host host : hosts) { - if (!hostSupport_iScsi(host)) { + if (!hostSupport_iScsi(host)){ return false; } } @@ -931,8 +858,7 @@ public static boolean hostsSupport_iScsi(List hosts) { } public static boolean hostSupport_iScsi(Host host) { - if (host == null || host.getStorageUrl() == null || host.getStorageUrl().trim().length() == 0 - || !host.getStorageUrl().startsWith("iqn")) { + if (host == null || host.getStorageUrl() == null || host.getStorageUrl().trim().length() == 0 || !host.getStorageUrl().startsWith("iqn")) { return false; } return true; @@ -945,15 +871,14 @@ public static String getInitiatorGroupKey(long storagePoolId) { /** * Checks wether a host initiator is present in an initiator group * - * @param initiator Host initiator to check + * @param initiator Host initiator to check * @param initiatorGroup the initiator group * @return true if host initiator is in the group, false otherwise */ - public static boolean isInitiatorPresentInGroup(DateraObject.Initiator initiator, - DateraObject.InitiatorGroup initiatorGroup) { + public static boolean isInitiatorPresentInGroup(DateraObject.Initiator initiator, DateraObject.InitiatorGroup initiatorGroup) { - for (String memberPath : initiatorGroup.getMembers()) { - if (memberPath.equals(initiator.getPath())) { + for (DateraObject.Initiator member : initiatorGroup.getMembers() ) { + if (member.getPath().equals(initiator.getPath())) { return true; } } @@ -961,12 +886,12 @@ public static boolean isInitiatorPresentInGroup(DateraObject.Initiator initiator return false; } - public static int bytesToGib(long volumeSizeBytes) { - return (int) Math.ceil(volumeSizeBytes / (double) ONEGIB_BYTES); + public static int bytesToGb(long volumeSizeBytes) { + return (int) Math.ceil(volumeSizeBytes/1073741824.0); } - public static long gibToBytes(int volumeSizeGb) { - return volumeSizeGb * ONEGIB_BYTES; + public static long gbToBytes(int volumeSizeGb) { + return volumeSizeGb*1024*1024; } /** @@ -1009,20 +934,5 @@ public static String extractIqn(String iqnPath) { return tokens[1].trim(); } - /** - * Generate random uuid - * - * @param seed - * @param length ( default to 8 ) - * @return String uuid - */ - public static String generateUUID(String seed) { - int length = UUID_LENGTH; - // creating UUID - UUID uid = UUID.fromString(seed); - String uuid = String.valueOf(uid.randomUUID()).substring(0, length); - - return uuid; - } } diff --git a/plugins/storage/volume/datera/src/main/resources/META-INF/cloudstack/storage-volume-datera/spring-storage-volume-datera-context.xml b/plugins/storage/volume/datera/src/main/resources/META-INF/cloudstack/storage-volume-datera/spring-storage-volume-datera-context.xml index 84ca8fd4141a..bdf613ff41fb 100644 --- a/plugins/storage/volume/datera/src/main/resources/META-INF/cloudstack/storage-volume-datera/spring-storage-volume-datera-context.xml +++ b/plugins/storage/volume/datera/src/main/resources/META-INF/cloudstack/storage-volume-datera/spring-storage-volume-datera-context.xml @@ -21,10 +21,10 @@ xmlns:context="http://www.springframework.org/schema/context" xmlns:aop="http://www.springframework.org/schema/aop" xsi:schemaLocation="http://www.springframework.org/schema/beans - http://www.springframework.org/schema/beans/spring-beans.xsd - http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop.xsd + http://www.springframework.org/schema/beans/spring-beans-3.0.xsd + http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop-3.0.xsd http://www.springframework.org/schema/context - http://www.springframework.org/schema/context/spring-context.xsd" + http://www.springframework.org/schema/context/spring-context-3.0.xsd" > getStoragePaths(long clusterId, long storagePoolId) { if (instanceId != null) { VMInstanceVO vmInstance = vmDao.findById(instanceId); - Long hostIdForVm = vmInstance.getHostId() != null ? vmInstance.getHostId() : vmInstance.getLastHostId(); - - if (hostIdForVm != null) { - HostVO hostForVm = hostDao.findById(hostIdForVm); - - if (hostForVm != null && hostForVm.getClusterId().equals(clusterId)) { - storagePaths.add(volume.get_iScsiName()); + Long hostIdForVm = null; + if(vmInstance != null) { + hostIdForVm = vmInstance.getHostId() != null ? vmInstance.getHostId() : vmInstance.getLastHostId(); + if (hostIdForVm != null) { + HostVO hostForVm = hostDao.findById(hostIdForVm); + + if (hostForVm != null && hostForVm.getClusterId().equals(clusterId)) { + storagePaths.add(volume.get_iScsiName()); + } } + } + LOGGER.debug("Adding storage path with instance id " + instanceId + "for vm instance host id " + hostIdForVm); } } } diff --git a/pom.xml b/pom.xml index 87ca50537d4e..69f94a8e8056 100644 --- a/pom.xml +++ b/pom.xml @@ -296,6 +296,11 @@ ${cs.jackson.version} bundle + + com.bettercloud + vault-java-driver + 3.1.0 + com.globo.globodns globodns-client diff --git a/scripts/util/ipmi.py b/scripts/util/ipmi.py index 2b5c36c06f96..43567f9df88a 100755 --- a/scripts/util/ipmi.py +++ b/scripts/util/ipmi.py @@ -22,7 +22,8 @@ import sys, os, subprocess, errno, re from os.path import exists -TOOl_PATH = "/usr/bin/ipmitool" +TOOL_PATH = "/usr/bin/ipmitool" +IPMI_INTERFACE = "lanplus" # IPMI v2.0 try: from subprocess import check_call @@ -79,7 +80,7 @@ def __repr__(self): return self.__str__() ipmitool = Command("ipmitool") def check_tool(): - if exists(TOOl_PATH) == False: + if exists(TOOL_PATH) == False: print "Can not find ipmitool" return False @@ -92,7 +93,7 @@ def ping(args): print "No hostname" return 1 - o = ipmitool("-H", hostname, "-U", usrname, "-P", password, "chassis", "power", "status") + o = ipmitool("-I", IPMI_INTERFACE, "-H", hostname, "-U", usrname, "-P", password, "chassis", "power", "status") if o.ret: print o.stderr return 1 @@ -114,7 +115,7 @@ def boot_dev(args): print "No boot device specified" return 1 - o = ipmitool("-H", hostname, "-U", usrname, "-P", password, "chassis", "bootdev", dev) + o = ipmitool("-I", IPMI_INTERFACE, "-H", hostname, "-U", usrname, "-P", password, "chassis", "bootdev", dev) if o.ret: print o.stderr return 1 @@ -130,14 +131,14 @@ def reboot(args): print "No hostname" return 1 - o = ipmitool("-H", hostname, "-U", usrname, "-P", password, "chassis", "power", "status") + o = ipmitool("-I", IPMI_INTERFACE, "-H", hostname, "-U", usrname, "-P", password, "chassis", "power", "status") if o.ret: print o.stderr return 1 if "is on" in o.stdout: - o = ipmitool("-H", hostname, "-U", usrname, "-P", password, "chassis", "power", "cycle") + o = ipmitool("-I", IPMI_INTERFACE, "-H", hostname, "-U", usrname, "-P", password, "chassis", "power", "cycle") else: o = ipmitool("-H", hostname, "-U", usrname, "-P", password, "chassis", "power", "reset") @@ -157,7 +158,7 @@ def power(args): print "No hostname" return 1 - o = ipmitool("-H", hostname, "-U", usrname, "-P", password, "chassis", "power", action) + o = ipmitool("-I", IPMI_INTERFACE, "-H", hostname, "-U", usrname, "-P", password, "chassis", "power", action) if o.ret: print o.stderr return 1 @@ -168,7 +169,7 @@ def boot_or_reboot(args): hostname = args.get("hostname") usrname = args.get("usrname") password = args.get("password") - o = ipmitool("-H", hostname, "-U", usrname, "-P", password, "chassis", "power", "status") + o = ipmitool("-I", IPMI_INTERFACE, "-H", hostname, "-U", usrname, "-P", password, "chassis", "power", "status") if o.ret: print o.stderr return 1 diff --git a/scripts/vm/hypervisor/xenserver/logrotate b/scripts/vm/hypervisor/xenserver/logrotate index 375b7e700e3c..ab4d694ab2fe 100644 --- a/scripts/vm/hypervisor/xenserver/logrotate +++ b/scripts/vm/hypervisor/xenserver/logrotate @@ -20,7 +20,7 @@ # # script to perform logrotation on xenserver 6.0.2 and later -/usr/sbin/logrotate /etc/logrotate.d/cloudlog +/usr/sbin/logrotate /etc/logrotate.conf EXITVALUE=$? if [ $EXITVALUE != 0 ]; then /usr/bin/logger -t logrotate "ALERT exited abnormally with [$EXITVALUE]" diff --git a/server/src/main/java/com/cloud/api/ApiResponseHelper.java b/server/src/main/java/com/cloud/api/ApiResponseHelper.java index 411d0e1c0a35..b6bea04844cf 100644 --- a/server/src/main/java/com/cloud/api/ApiResponseHelper.java +++ b/server/src/main/java/com/cloud/api/ApiResponseHelper.java @@ -1688,10 +1688,12 @@ public RemoteAccessVpnResponse createRemoteAccessVpnResponse(RemoteAccessVpn vpn } vpnResponse.setIpRange(vpn.getIpRange()); vpnResponse.setPresharedKey(vpn.getIpsecPresharedKey()); + vpnResponse.setCertificate(vpn.getCaCertificate()); populateOwner(vpnResponse, vpn); vpnResponse.setState(vpn.getState().toString()); vpnResponse.setId(vpn.getUuid()); vpnResponse.setForDisplay(vpn.isDisplay()); + vpnResponse.setType(vpn.getVpnType()); vpnResponse.setObjectName("remoteaccessvpn"); return vpnResponse; @@ -3182,6 +3184,7 @@ public VpcResponse createVpcResponse(ResponseView view, Vpc vpc) { response.setUsesDistributedRouter(vpc.usesDistributedRouter()); response.setRedundantRouter(vpc.isRedundant()); response.setRegionLevelVpc(vpc.isRegionLevelVpc()); + response.setNetworkBootIp(vpc.getNetworkBootIp()); Map> serviceProviderMap = ApiDBUtils.listVpcOffServices(vpc.getVpcOfferingId()); List serviceResponses = new ArrayList(); diff --git a/server/src/main/java/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java index 6b10afbdb1fd..ac14052206c7 100644 --- a/server/src/main/java/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java @@ -101,6 +101,10 @@ public DiskOfferingResponse newDiskOfferingResponse(DiskOfferingJoinVO offering) diskOfferingResponse.setDiskSize(offering.getDiskSize() / (1024 * 1024 * 1024)); diskOfferingResponse.setMinIops(offering.getMinIops()); diskOfferingResponse.setMaxIops(offering.getMaxIops()); + diskOfferingResponse.setMinIopsPerGb(offering.getMinIopsPerGb()); + diskOfferingResponse.setMaxIopsPerGb(offering.getMaxIopsPerGb()); + diskOfferingResponse.setHighestMinIops(offering.getHighestMinIops()); + diskOfferingResponse.setHighestMaxIops(offering.getHighestMaxIops()); diskOfferingResponse.setDisplayOffering(offering.isDisplayOffering()); diskOfferingResponse.setDomainId(offering.getDomainUuid()); diff --git a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java index 3a71b3e2b3d2..d56ac97d889d 100644 --- a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java @@ -188,9 +188,14 @@ public TemplateResponse newTemplateResponse(EnumSet templateResponse.setDisplayText(template.getDisplayText()); templateResponse.setPublic(template.isPublicTemplate()); templateResponse.setCreated(template.getCreatedOnStore()); + if (template.getFormat() == Storage.ImageFormat.BAREMETAL) { // for baremetal template, we didn't download, but is ready to use. templateResponse.setReady(true); + } else if (template.getFormat() == Storage.ImageFormat.PXEBOOT) { + // For PXEBOOT we don't need to download, but it's ready to use. + templateResponse.setReady(true); + templateResponse.setBootFilename(template.getBootFilename()); } else { templateResponse.setReady(template.getState() == ObjectInDataStoreStateMachine.State.Ready); } diff --git a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java index 978d5c0b19c0..e09754eb5367 100644 --- a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java @@ -28,9 +28,6 @@ import javax.inject.Inject; -import com.cloud.network.vpc.VpcVO; -import com.cloud.network.vpc.dao.VpcDao; -import com.cloud.storage.DiskOfferingVO; import org.apache.cloudstack.affinity.AffinityGroupResponse; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; @@ -52,7 +49,10 @@ import com.cloud.api.ApiResponseHelper; import com.cloud.api.query.vo.UserVmJoinVO; import com.cloud.gpu.GPU; +import com.cloud.network.vpc.VpcVO; +import com.cloud.network.vpc.dao.VpcDao; import com.cloud.service.ServiceOfferingDetailsVO; +import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.GuestOS; import com.cloud.user.Account; import com.cloud.user.AccountManager; diff --git a/server/src/main/java/com/cloud/api/query/vo/DiskOfferingJoinVO.java b/server/src/main/java/com/cloud/api/query/vo/DiskOfferingJoinVO.java index 2013c37161dd..d8963fe4c163 100644 --- a/server/src/main/java/com/cloud/api/query/vo/DiskOfferingJoinVO.java +++ b/server/src/main/java/com/cloud/api/query/vo/DiskOfferingJoinVO.java @@ -161,6 +161,18 @@ public class DiskOfferingJoinVO extends BaseViewVO implements InternalIdentity, @Column(name = "disk_size_strictness") boolean diskSizeStrictness; + @Column(name = "min_iops_per_gb") + Long minIopsPerGb; + + @Column(name = "max_iops_per_gb") + Long maxIopsPerGb; + + @Column(name = "highest_min_iops") + Long highestMinIops; + + @Column(name = "highest_max_iops") + Long highestMaxIops; + public DiskOfferingJoinVO() { } @@ -349,4 +361,21 @@ public String getVsphereStoragePolicy() { public boolean getDiskSizeStrictness() { return diskSizeStrictness; } + + public Long getMinIopsPerGb() { + return minIopsPerGb; + } + + public Long getMaxIopsPerGb() { + return maxIopsPerGb; + } + + public Long getHighestMinIops() { + return highestMinIops; + } + + public Long getHighestMaxIops() { + return highestMaxIops; + + } } diff --git a/server/src/main/java/com/cloud/api/query/vo/TemplateJoinVO.java b/server/src/main/java/com/cloud/api/query/vo/TemplateJoinVO.java index acad03e99bcb..864014c07b7c 100644 --- a/server/src/main/java/com/cloud/api/query/vo/TemplateJoinVO.java +++ b/server/src/main/java/com/cloud/api/query/vo/TemplateJoinVO.java @@ -68,6 +68,9 @@ public class TemplateJoinVO extends BaseViewWithTagInformationVO implements Cont @Column(name = "hvm") private boolean requiresHvm; + @Column(name = "boot_filename") + private String bootFilename; + @Column(name = "bits") private int bits; @@ -350,6 +353,8 @@ public boolean isRequiresHvm() { return requiresHvm; } + public String getBootFilename() { return bootFilename; } + public int getBits() { return bits; } diff --git a/server/src/main/java/com/cloud/configuration/Config.java b/server/src/main/java/com/cloud/configuration/Config.java index 67fe9ad19a89..2a7795d0f244 100644 --- a/server/src/main/java/com/cloud/configuration/Config.java +++ b/server/src/main/java/com/cloud/configuration/Config.java @@ -1706,6 +1706,20 @@ public enum Config { "5", "ipmi interface will be temporary out of order after power opertions(e.g. cycle, on), it leads following commands fail immediately. The value specifies retry times before accounting it as real failure", null), + BaremetalIpmiRetryDelay("Advanced", + ManagementServer.class, + String.class, + "baremetal.ipmi.fail.retry.delay", + "1", + "The value specifies retry time delay in seconds between each retry", + null), + BaremetalIpmiTimeout("Advanced", + ManagementServer.class, + String.class, + "baremetal.ipmi.fail.timeout", + "0", + "The value specifies the time to wait when doing an ipmi call in milliseconds.", + null), ApiLimitEnabled("Advanced", ManagementServer.class, Boolean.class, "api.throttling.enabled", "false", "Enable/disable Api rate limit", null), ApiLimitInterval("Advanced", ManagementServer.class, Integer.class, "api.throttling.interval", "1", "Time interval (in seconds) to reset API count", null), @@ -1784,7 +1798,9 @@ public enum Config { // StatsCollector StatsOutPutGraphiteHost("Advanced", ManagementServer.class, String.class, "stats.output.uri", "", "URI to additionally send StatsCollector statistics to", null), - SSVMPSK("Hidden", ManagementServer.class, String.class, "upload.post.secret.key", "", "PSK with SSVM", null); + SSVMPSK("Hidden", ManagementServer.class, String.class, "upload.post.secret.key", "", "PSK with SSVM", null), + + VPCUsageWhiteListCIDR("Network", ManagementServer.class, String.class, "vpc.usage.whitelist.cidr", null, "List of CIDRs to track usage separately in VPCs", "routes"); private final String _category; private final Class _componentClass; diff --git a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index ff45b73509a9..3084c2291ce5 100755 --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@ -3431,14 +3431,17 @@ public List getServiceOfferingZones(Long serviceOfferingId) { return _serviceOfferingDetailsDao.findZoneIds(serviceOfferingId); } - protected DiskOfferingVO createDiskOffering(final Long userId, final List domainIds, final List zoneIds, final String name, final String description, final String provisioningType, - final Long numGibibytes, String tags, boolean isCustomized, final boolean localStorageRequired, - final boolean isDisplayOfferingEnabled, final Boolean isCustomizedIops, Long minIops, Long maxIops, - Long bytesReadRate, Long bytesReadRateMax, Long bytesReadRateMaxLength, - Long bytesWriteRate, Long bytesWriteRateMax, Long bytesWriteRateMaxLength, - Long iopsReadRate, Long iopsReadRateMax, Long iopsReadRateMaxLength, - Long iopsWriteRate, Long iopsWriteRateMax, Long iopsWriteRateMaxLength, - final Integer hypervisorSnapshotReserve, String cacheMode, final Map details, final Long storagePolicyID, final boolean diskSizeStrictness) { + protected DiskOfferingVO createDiskOffering(final Long userId, final List domainIds, final List zoneIds, + final String name, final String description, final String provisioningType, + final Long numGibibytes, String tags, boolean isCustomized, final boolean localStorageRequired, + final boolean isDisplayOfferingEnabled, final Boolean isCustomizedIops, Long minIops, Long maxIops, + Long bytesReadRate, Long bytesReadRateMax, Long bytesReadRateMaxLength, + Long bytesWriteRate, Long bytesWriteRateMax, Long bytesWriteRateMaxLength, + Long iopsReadRate, Long iopsReadRateMax, Long iopsReadRateMaxLength, + Long iopsWriteRate, Long iopsWriteRateMax, Long iopsWriteRateMaxLength, + final Integer hypervisorSnapshotReserve, String cacheMode, final Map details, + final Long storagePolicyID, final boolean diskSizeStrictness, + Long minIopsPerGb, Long maxIopsPerGb, Long highestMinIops, Long highestMaxIops) { long diskSize = 0;// special case for custom disk offerings long maxVolumeSizeInGb = VolumeOrchestrationService.MaxVolumeSize.value(); if (numGibibytes != null && numGibibytes <= 0) { @@ -3456,6 +3459,61 @@ protected DiskOfferingVO createDiskOffering(final Long userId, final List isCustomized = true; } + if (minIopsPerGb != null || maxIopsPerGb != null) { + + if (!isCustomized) { + throw new InvalidParameterValueException("Cannot set Min/Max IOPS/GB for a fixed size disk offering"); + } + + if ((isCustomizedIops != null && isCustomizedIops) || minIops != null || maxIops != null) { + throw new InvalidParameterValueException("Cannot set Min/Max IOPS/GB with either " + + "custom IOPS or fixed IOPS"); + } + + if (minIopsPerGb != null && maxIopsPerGb != null) { + if (minIopsPerGb <= 0 || maxIopsPerGb <= 0) { + throw new InvalidParameterValueException("Min/Max IOPS/GB value must be greater than 0"); + } + + if (minIopsPerGb > maxIopsPerGb){ + throw new InvalidParameterValueException("Min IOPS/GB must be greater than max IOPS/GB"); + } + } + + //if either one of them is set but the other is not + if ((minIopsPerGb != null && maxIopsPerGb == null) || (minIopsPerGb == null && maxIopsPerGb != null)) { + throw new InvalidParameterValueException("Both min IOPS/GB and max IOPS/GB must be specified"); + } + } + + if (highestMinIops != null && highestMaxIops != null) { + if (highestMinIops > highestMaxIops){ + throw new InvalidParameterValueException("highestminiops must be less than highestmaxiops"); + } + if (highestMinIops <= 0 || highestMaxIops <= 0) { + throw new InvalidParameterValueException("highestminiops/highestmaxiops value must be greater than 0"); + } + + + if (minIopsPerGb == null && (isCustomizedIops == null || !isCustomizedIops)) { + throw new InvalidParameterValueException("highestminops specified but none of customizediops or miniopspergb specified"); + } + if (minIops != null) { + throw new InvalidParameterValueException("highestminiops cannot be specified with fixed miniops"); + } + + if (maxIopsPerGb == null && (isCustomizedIops == null || !isCustomizedIops)) { + throw new InvalidParameterValueException("highestmaxiops specified but none of customizediops or maxiopspergb specified"); + } + if (maxIops != null) { + throw new InvalidParameterValueException("highestmaxiops cannot be specified with fixed maxiops"); + } + }else { + if (highestMaxIops != null || highestMinIops != null) { + throw new InvalidParameterValueException("Both highestminiops and highestmaxiops should be specified"); + } + } + if (Boolean.TRUE.equals(isCustomizedIops) || isCustomizedIops == null) { minIops = null; maxIops = null; @@ -3478,14 +3536,14 @@ protected DiskOfferingVO createDiskOffering(final Long userId, final List } } - // Filter child domains when both parent and child domains are present - List filteredDomainIds = filterChildSubDomains(domainIds); - // Check if user exists in the system final User user = _userDao.findById(userId); if (user == null || user.getRemoved() != null) { throw new InvalidParameterValueException("Unable to find active user by id " + userId); } + + // Filter child domains when both parent and child domains are present + List filteredDomainIds = filterChildSubDomains(domainIds); final Account account = _accountDao.findById(user.getAccountId()); if (account.getType() == Account.Type.DOMAIN_ADMIN) { if (filteredDomainIds.isEmpty()) { @@ -3516,6 +3574,20 @@ protected DiskOfferingVO createDiskOffering(final Long userId, final List newDiskOffering.setCacheMode(DiskOffering.DiskCacheMode.valueOf(cacheMode.toUpperCase())); } + if (highestMinIops != null && highestMinIops > 0) { + newDiskOffering.setHighestMinIops(highestMinIops); + } + if (highestMaxIops != null && highestMaxIops > 0) { + newDiskOffering.setHighestMaxIops(highestMaxIops); + } + + if (minIopsPerGb != null && minIopsPerGb > 0) { + newDiskOffering.setMinIopsPerGb(minIopsPerGb); + } + if (maxIopsPerGb != null && maxIopsPerGb > 0) { + newDiskOffering.setMaxIopsPerGb(maxIopsPerGb); + } + if (hypervisorSnapshotReserve != null && hypervisorSnapshotReserve < 0) { throw new InvalidParameterValueException("If provided, Hypervisor Snapshot Reserve must be greater than or equal to 0."); } @@ -3630,6 +3702,7 @@ public DiskOffering createDiskOffering(final CreateDiskOfferingCmd cmd) { final Long iopsWriteRateMax = cmd.getIopsWriteRateMax(); final Long iopsWriteRateMaxLength = cmd.getIopsWriteRateMaxLength(); final Integer hypervisorSnapshotReserve = cmd.getHypervisorSnapshotReserve(); + final String cacheMode = cmd.getCacheMode(); validateMaxRateEqualsOrGreater(iopsReadRate, iopsReadRateMax, IOPS_READ_RATE); @@ -3639,12 +3712,18 @@ public DiskOffering createDiskOffering(final CreateDiskOfferingCmd cmd) { validateMaximumIopsAndBytesLength(iopsReadRateMaxLength, iopsWriteRateMaxLength, bytesReadRateMaxLength, bytesWriteRateMaxLength); + final Long minIopsPerGb = cmd.getMinIopsPerGb(); + final Long maxIopsPerGb = cmd.getMaxIopsPerGb(); + final Long highestMinIops = cmd.getHighestMinIops(); + final Long highestMaxIops = cmd.getHighestMaxIops(); + final Long userId = CallContext.current().getCallingUserId(); return createDiskOffering(userId, domainIds, zoneIds, name, description, provisioningType, numGibibytes, tags, isCustomized, localStorageRequired, isDisplayOfferingEnabled, isCustomizedIops, minIops, maxIops, bytesReadRate, bytesReadRateMax, bytesReadRateMaxLength, bytesWriteRate, bytesWriteRateMax, bytesWriteRateMaxLength, iopsReadRate, iopsReadRateMax, iopsReadRateMaxLength, iopsWriteRate, iopsWriteRateMax, iopsWriteRateMaxLength, - hypervisorSnapshotReserve, cacheMode, details, storagePolicyId, diskSizeStrictness); + hypervisorSnapshotReserve, cacheMode, details, storagePolicyId, diskSizeStrictness, minIopsPerGb, + maxIopsPerGb, highestMinIops, highestMaxIops); } /** diff --git a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java index 20b7a2b67d6b..7e08c6d163ef 100644 --- a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java +++ b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java @@ -283,6 +283,7 @@ protected VirtualMachineTO toVirtualMachineTO(VirtualMachineProfile vmProfile) { to.setConfigDriveLocation(vmProfile.getConfigDriveLocation()); to.setState(vm.getState()); + to.setFormat(vmProfile.getTemplate().getFormat()); return to; } diff --git a/server/src/main/java/com/cloud/network/ExternalFirewallDeviceManagerImpl.java b/server/src/main/java/com/cloud/network/ExternalFirewallDeviceManagerImpl.java index 21eae27ea667..6791c2502179 100644 --- a/server/src/main/java/com/cloud/network/ExternalFirewallDeviceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/ExternalFirewallDeviceManagerImpl.java @@ -89,6 +89,7 @@ import com.cloud.network.rules.PortForwardingRule; import com.cloud.network.rules.StaticNat; import com.cloud.network.rules.dao.PortForwardingRulesDao; +import com.cloud.network.vpn.RemoteAccessVpnService; import com.cloud.offering.NetworkOffering; import com.cloud.offerings.NetworkOfferingVO; import com.cloud.offerings.dao.NetworkOfferingDao; @@ -705,8 +706,22 @@ public boolean manageRemoteAccessVpn(boolean create, Network network, RemoteAcce String maskedIpRange = ipRange[0] + "-" + ipRange[1]; - RemoteAccessVpnCfgCommand createVpnCmd = - new RemoteAccessVpnCfgCommand(create, ip.getAddress().addr(), vpn.getLocalIp(), maskedIpRange, vpn.getIpsecPresharedKey(), false); + final String vpnType = null; + final String caCert = null; + final String serverCert = null; + final String serverKey = null; + + RemoteAccessVpnCfgCommand createVpnCmd = new RemoteAccessVpnCfgCommand( + create, + ip.getAddress().addr(), + vpn.getLocalIp(), + maskedIpRange, + vpn.getIpsecPresharedKey(), + false, + vpnType, + caCert, + serverCert, + serverKey); createVpnCmd.setAccessDetail(NetworkElementCommand.ACCOUNT_ID, String.valueOf(network.getAccountId())); createVpnCmd.setAccessDetail(NetworkElementCommand.GUEST_NETWORK_CIDR, network.getCidr()); Answer answer = _agentMgr.easySend(externalFirewall.getId(), createVpnCmd); @@ -740,7 +755,9 @@ public boolean manageRemoteAccessVpnUsers(Network network, RemoteAccessVpn vpn, } } - VpnUsersCfgCommand addUsersCmd = new VpnUsersCfgCommand(addUsers, removeUsers); + String vpnType = _configDao.getValue(RemoteAccessVpnService.RemoteAccessVpnTypeConfigKey); + + VpnUsersCfgCommand addUsersCmd = new VpnUsersCfgCommand(addUsers, removeUsers, vpnType); addUsersCmd.setAccessDetail(NetworkElementCommand.ACCOUNT_ID, String.valueOf(network.getAccountId())); addUsersCmd.setAccessDetail(NetworkElementCommand.GUEST_NETWORK_CIDR, network.getCidr()); diff --git a/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java b/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java index 7ec4d0fd37ef..18ee02d421e5 100644 --- a/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java +++ b/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java @@ -954,6 +954,29 @@ public List doInTransaction(TransactionStatus status) throws Insuff if (lockOneRow) { assert (addrs.size() == 1) : "Return size is incorrect: " + addrs.size(); +//======= +// assert(addrs.size() == 1) : "Return size is incorrect: " + addrs.size(); +// +// if (!fetchFromDedicatedRange && VlanType.VirtualNetwork.equals(vlanUse)) { +// // Check that the maximum number of public IPs for the given accountId will not be exceeded +// try { +// _resourceLimitMgr.checkResourceLimit(owner, ResourceType.public_ip); +// } catch (ResourceAllocationException ex) { +// s_logger.warn("Failed to allocate resource of type " + ex.getResourceType() + " for account " + owner); +// throw new AccountLimitException("Maximum number of public IP addresses for account: " + owner.getAccountName() + " has been exceeded."); +// } +// } +// +// IPAddressVO finalAddr = getIpAddressVO(addrs, sourceNat, owner, isSystem, displayIp, vlanUse, guestNetworkId, vpcId, ignoreIp); +// +// if(finalAddr == null && ignoreIp != null) { +// finalAddr = getIpAddressVO(addrs, sourceNat, owner, isSystem, displayIp, vlanUse, guestNetworkId, vpcId, null); +// } +// +// if (finalAddr == null) { +// s_logger.error("Failed to fetch any free public IP address"); +// throw new CloudRuntimeException("Failed to fetch any free public IP address"); +//>>>>>>> ak-ht-rebase-4.13 } if (assign) { assignAndAllocateIpAddressEntry(owner, vlanUse, guestNetworkId, sourceNat, allocate, @@ -964,6 +987,46 @@ public List doInTransaction(TransactionStatus status) throws Insuff }); } + private IPAddressVO getIpAddressVO(List addrs, boolean sourceNat, Account owner, boolean isSystem, Boolean displayIp, VlanType vlanUse, Long guestNetworkId, Long vpcId, String ignoreIp) { + IPAddressVO finalAddr = null; + for (final IPAddressVO possibleAddr: addrs) { + if(ignoreIp != null && ignoreIp.equals(possibleAddr.getAddress().addr())) { + continue; + } + + if (possibleAddr.getState() != State.Free) { + continue; + } + + final IPAddressVO addr = possibleAddr; + addr.setSourceNat(sourceNat); + addr.setAllocatedTime(new Date()); + addr.setAllocatedInDomainId(owner.getDomainId()); + addr.setAllocatedToAccountId(owner.getId()); + addr.setSystem(isSystem); + + if (displayIp != null) { + addr.setDisplay(displayIp); + } + + if (vlanUse != VlanType.DirectAttached) { + addr.setAssociatedWithNetworkId(guestNetworkId); + addr.setVpcId(vpcId); + } + if (_ipAddressDao.lockRow(possibleAddr.getId(), true) != null) { + final IPAddressVO userIp = _ipAddressDao.findById(addr.getId()); + if (userIp.getState() == State.Free) { + addr.setState(State.Allocating); + if (_ipAddressDao.update(addr.getId(), addr)) { + finalAddr = addr; + break; + } + } + } + } + return finalAddr; + } + @DB @Override public void markPublicIpAsAllocated(final IPAddressVO addr) { @@ -1032,7 +1095,13 @@ public PublicIp assignSourceNatIpAddressToGuestNetwork(Account owner, Network gu @Override public PublicIp assignDedicateIpAddress(Account owner, final Long guestNtwkId, final Long vpcId, final long dcId, final boolean isSourceNat) throws ConcurrentOperationException, InsufficientAddressCapacityException { + return assignDedicateIpAddress(owner, guestNtwkId, vpcId, dcId, isSourceNat, null); + } + @DB + @Override + public PublicIp assignDedicateIpAddress(Account owner, final Long guestNtwkId, final Long vpcId, final long dcId, final boolean isSourceNat, String ignoreIp) + throws ConcurrentOperationException, InsufficientAddressCapacityException { final long ownerId = owner.getId(); PublicIp ip = null; diff --git a/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java b/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java index 748ea5c68a67..e3004330e493 100644 --- a/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java @@ -298,7 +298,7 @@ public Long makeCopyOfVpc(long vpcId, long vpcOfferingId) { long copyOfVpcId; try { copyOfVpc = _vpcService.createVpc(vpc.getZoneId(), vpcOfferingId, vpc.getAccountId(), vpc.getName(), vpc.getDisplayText(), vpc.getCidr(), - vpc.getNetworkDomain(), vpc.isDisplay()); + vpc.getNetworkDomain(), vpc.isDisplay(), vpc.getNetworkBootIp()); copyOfVpcId = copyOfVpc.getId(); //on resume of migration the uuid will be swapped already. So the copy will have the value of the original vpcid. _resourceTagDao.persist(new ResourceTagVO(MIGRATION, Long.toString(vpcId), vpc.getAccountId(), vpc.getDomainId(), copyOfVpcId, ResourceTag.ResourceObjectType.Vpc, null, vpc.getUuid())); diff --git a/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java b/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java index 0900509aad5c..ff5481993dbb 100644 --- a/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java +++ b/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java @@ -533,7 +533,7 @@ private static Map> setCapabilities() { final Map lbCapabilities = new HashMap(); lbCapabilities.put(Capability.SupportedLBAlgorithms, "roundrobin,leastconn,source"); lbCapabilities.put(Capability.SupportedLBIsolation, "dedicated"); - lbCapabilities.put(Capability.SupportedProtocols, "tcp, udp, tcp-proxy"); + lbCapabilities.put(Capability.SupportedProtocols, "tcp, udp, tcp-proxy, tcp-proxy-v2"); lbCapabilities.put(Capability.SupportedStickinessMethods, getHAProxyStickinessCapability()); lbCapabilities.put(Capability.LbSchemes, LoadBalancerContainer.Scheme.Public.toString()); diff --git a/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java b/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java index 6839a6ae135b..be0eb94cdc03 100644 --- a/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java +++ b/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java @@ -29,6 +29,8 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.pki.PkiManager; +import org.apache.cloudstack.resourcedetail.dao.RemoteAccessVpnDetailsDao; import org.apache.log4j.Logger; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; @@ -95,6 +97,8 @@ import com.cloud.network.dao.NetworkDetailVO; import com.cloud.network.dao.NetworkDetailsDao; import com.cloud.network.dao.NetworkVO; +import com.cloud.network.dao.RemoteAccessVpnDao; +import com.cloud.network.dao.RemoteAccessVpnVO; import com.cloud.network.dao.Site2SiteCustomerGatewayDao; import com.cloud.network.dao.Site2SiteCustomerGatewayVO; import com.cloud.network.dao.Site2SiteVpnGatewayDao; @@ -120,6 +124,9 @@ import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.offerings.dao.NetworkOfferingDetailsDao; import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.storage.Storage; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.VMTemplateDao; import com.cloud.user.Account; import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; @@ -197,6 +204,12 @@ public class CommandSetupHelper { private NetworkDetailsDao networkDetailsDao; @Inject Ipv6Service ipv6Service; + @Inject + private RemoteAccessVpnDetailsDao remoteAccessVpnDetailsDao; + @Inject + private RemoteAccessVpnDao remoteAccessVpnDao; + @Inject + private VMTemplateDao _templateDao; @Autowired @Qualifier("networkHelper") @@ -229,7 +242,9 @@ public void createApplyVpnUsersCommand(final List users, fina } } - final VpnUsersCfgCommand cmd = new VpnUsersCfgCommand(addUsers, removeUsers); + RemoteAccessVpnVO vpnVO = remoteAccessVpnDao.findByAccountAndVpc(router.getAccountId(), router.getVpcId()); + + final VpnUsersCfgCommand cmd = new VpnUsersCfgCommand(addUsers, removeUsers, vpnVO.getVpnType()); cmd.setAccessDetail(NetworkElementCommand.ACCOUNT_ID, String.valueOf(router.getAccountId())); cmd.setAccessDetail(NetworkElementCommand.ROUTER_IP, _routerControlHelper.getRouterControlIp(router.getId())); cmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, router.getInstanceName()); @@ -264,6 +279,18 @@ public void createDhcpEntryCommand(final VirtualRouter router, final UserVm vm, dhcpCommand.setAccessDetail(NetworkElementCommand.ROUTER_GUEST_IP, _routerControlHelper.getRouterIpInNetwork(nic.getNetworkId(), router.getId())); dhcpCommand.setAccessDetail(NetworkElementCommand.ZONE_NETWORK_TYPE, dcVo.getNetworkType().toString()); + VMTemplateVO template = _templateDao.findById(vm.getTemplateId()); + if(template != null && template.getFormat().equals(Storage.ImageFormat.PXEBOOT)) { + if (org.apache.commons.lang.StringUtils.isNotBlank(template.getBootFilename())) { + dhcpCommand.setBootFilename(template.getBootFilename()); + } + + final Vpc vpc = _entityMgr.findById(Vpc.class, router.getVpcId()); + if (org.apache.commons.lang.StringUtils.isNotBlank(vpc.getNetworkBootIp())) { + dhcpCommand.setNetworkBootIp(vpc.getNetworkBootIp()); + } + } + cmds.addCommand("dhcp", dhcpCommand); } @@ -689,8 +716,21 @@ public void createApplyVpnCommands(final boolean isCreate, final RemoteAccessVpn cidr = network.getCidr(); } - final RemoteAccessVpnCfgCommand startVpnCmd = new RemoteAccessVpnCfgCommand(isCreate, ip.getAddress().addr(), vpn.getLocalIp(), vpn.getIpRange(), - vpn.getIpsecPresharedKey(), vpn.getVpcId() != null); + // read additional details from DB and fill them up in RemoteAccessVpnVO + final Map vpnDetials = remoteAccessVpnDetailsDao.getDetails(vpn.getId()); + final String vpnType = vpn.getVpnType(); + + final RemoteAccessVpnCfgCommand startVpnCmd = new RemoteAccessVpnCfgCommand( + isCreate, + ip.getAddress().addr(), + vpn.getLocalIp(), + vpn.getIpRange(), + vpn.getIpsecPresharedKey(), + vpn.getVpcId() != null, + vpnType, + vpnDetials.get(PkiManager.CREDENTIAL_ISSUING_CA), + vpnDetials.get(PkiManager.CREDENTIAL_CERTIFICATE), + vpnDetials.get(PkiManager.CREDENTIAL_PRIVATE_KEY)); startVpnCmd.setLocalCidr(cidr); startVpnCmd.setAccessDetail(NetworkElementCommand.ROUTER_IP, _routerControlHelper.getRouterControlIp(router.getId())); startVpnCmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, router.getInstanceName()); diff --git a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index 5ce43629087d..2a39ea5d42eb 100644 --- a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -1274,7 +1274,7 @@ private boolean restartVpcInDomainRouter(DomainRouterJoinVO router, User user) { ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, Domain.ROOT_DOMAIN, EventTypes.EVENT_ROUTER_HEALTH_CHECKS, "Recreating router " + router.getUuid() + " by restarting VPC " + router.getVpcUuid(), router.getId(), ApiCommandResourceType.DomainRouter.toString()); - return vpcService.restartVpc(router.getVpcId(), true, false, false, user); + return vpcService.restartVpc(router.getVpcId(), true, false, false, user, false); } catch (Exception e) { s_logger.error("Failed to restart VPC for router recreation " + router.getVpcName() + " ,router " + router.getUuid(), e); diff --git a/server/src/main/java/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java index 1d1b9494d763..99b5b1d13c5e 100644 --- a/server/src/main/java/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java @@ -27,6 +27,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -77,6 +78,7 @@ import com.cloud.network.vpc.StaticRouteProfile; import com.cloud.network.vpc.Vpc; import com.cloud.network.vpc.VpcGateway; +import com.cloud.network.vpc.VpcGatewayVO; import com.cloud.network.vpc.VpcManager; import com.cloud.network.vpc.VpcVO; import com.cloud.network.vpc.dao.PrivateIpDao; @@ -128,6 +130,8 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian private EntityManager _entityMgr; @Inject protected HypervisorGuruManager _hvGuruMgr; + @Inject + private ConfigurationDao configDao; @Override public boolean configure(final String name, final Map params) throws ConfigurationException { @@ -217,7 +221,7 @@ public boolean removeVpcRouterFromGuestNetwork(final VirtualRouter router, final protected boolean setupVpcGuestNetwork(final Network network, final VirtualRouter router, final boolean add, final NicProfile guestNic) throws ConcurrentOperationException, ResourceUnavailableException { - boolean result = true; + boolean result = true; if (router.getState() == State.Running) { final SetupGuestNetworkCommand setupCmd = _commandSetupHelper.createSetupGuestNetworkCommand((DomainRouterVO) router, add, guestNic); @@ -276,6 +280,19 @@ public boolean finalizeVirtualMachineProfile(final VirtualMachineProfile profile if (defaultDns2 != null) { buf.append(" dns2=").append(defaultDns2); } + + VpcGatewayVO privateGatewayForVpc = _vpcGatewayDao.getPrivateGatewayForVpc(domainRouterVO.getVpcId()); + if (privateGatewayForVpc != null) { + String ip4Address = privateGatewayForVpc.getIp4Address(); + buf.append(" privategateway=").append(ip4Address); + s_logger.debug("Set privategateway field in cmd_line.json to " + ip4Address); + } else { + buf.append(" privategateway=None"); + } + final String vpcWhitelistCidr = configDao.getValue("vpc.usage.whitelist.cidr"); + if (vpcWhitelistCidr != null && vpcWhitelistCidr.length() > 0) { + buf.append(" vpcusagewhitelist=").append(vpcWhitelistCidr); + } } } diff --git a/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java b/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java index 781ee38c8a40..12b9c22451ed 100644 --- a/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java @@ -115,6 +115,7 @@ import com.cloud.network.vpc.dao.VpcOfferingDetailsDao; import com.cloud.network.vpc.dao.VpcOfferingServiceMapDao; import com.cloud.network.vpc.dao.VpcServiceMapDao; +import com.cloud.network.vpn.RemoteAccessVpnService; import com.cloud.network.vpn.Site2SiteVpnManager; import com.cloud.offering.NetworkOffering; import com.cloud.offerings.NetworkOfferingServiceMapVO; @@ -211,6 +212,8 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @Inject Site2SiteVpnManager _s2sVpnMgr; @Inject + RemoteAccessVpnService remoteVpnMgr; + @Inject VlanDao _vlanDao = null; @Inject ResourceLimitService _resourceLimitMgr; @@ -259,6 +262,7 @@ protected void setupSupportedVpcHypervisorsList() { hTypes.add(HypervisorType.LXC); hTypes.add(HypervisorType.Hyperv); hTypes.add(HypervisorType.Ovm3); + hTypes.add(HypervisorType.BareMetal); } @Override @@ -966,10 +970,39 @@ public List getVpcOfferingZones(Long vpcOfferingId) { return vpcOfferingDetailsDao.findZoneIds(vpcOfferingId); } + @Override + @ActionEvent(eventType = EventTypes.EVENT_VPC_SOURCE_NAT_UPDATE, eventDescription = "updating vpc source nat") + public boolean updateVpcSourceNAT(final long id) throws InsufficientCapacityException, ResourceUnavailableException { + CallContext.current().setEventDetails(" ID: " + id); + + // Verify input parameters + final VpcVO vpcToUpdate = _vpcDao.findById(id); + if (vpcToUpdate == null) { + throw new InvalidParameterValueException("Unable to find vpc " + id); + } + + final Account caller = CallContext.current().getCallingAccount(); + final Account owner = _accountMgr.getAccount(vpcToUpdate.accountId); + final User callingUser = _accountMgr.getActiveUser(CallContext.current().getCallingUserId()); + + // Verify that caller can perform actions in behalf of vpc owner + _accountMgr.checkAccess(caller, null, false, owner); + + IpAddress ipAddress = getExistingSourceNatInVpc(owner.getId(), vpcToUpdate.getId()); + if(ipAddress == null) { + throw new InvalidParameterValueException("Can't find source nat ip for vpc " + id); + } + + _ipAddrMgr.disassociatePublicIpAddress(ipAddress.getId(), CallContext.current().getCallingUserId(), owner); + assignSourceNatIpAddressToVpc(owner, vpcToUpdate, ipAddress.getAddress().addr()); + + return restartVpc(vpcToUpdate.getId(), false, false, false, callingUser, false); + } + @Override @ActionEvent(eventType = EventTypes.EVENT_VPC_CREATE, eventDescription = "creating vpc", create = true) public Vpc createVpc(final long zoneId, final long vpcOffId, final long vpcOwnerId, final String vpcName, final String displayText, final String cidr, String networkDomain, - final Boolean displayVpc) throws ResourceAllocationException { + final Boolean displayVpc, final String networkBootIp) throws ResourceAllocationException { final Account caller = CallContext.current().getCallingAccount(); final Account owner = _accountMgr.getAccount(vpcOwnerId); @@ -1023,7 +1056,7 @@ public Vpc createVpc(final long zoneId, final long vpcOffId, final long vpcOwner final boolean useDistributedRouter = vpcOff.isSupportsDistributedRouter(); final VpcVO vpc = new VpcVO(zoneId, vpcName, displayText, owner.getId(), owner.getDomainId(), vpcOffId, cidr, networkDomain, useDistributedRouter, isRegionLevelVpcOff, - vpcOff.isRedundantRouter()); + vpcOff.isRedundantRouter(), networkBootIp); return createVpc(displayVpc, vpc); } @@ -1169,7 +1202,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { @Override @ActionEvent(eventType = EventTypes.EVENT_VPC_UPDATE, eventDescription = "updating vpc") - public Vpc updateVpc(final long vpcId, final String vpcName, final String displayText, final String customId, final Boolean displayVpc) { + public Vpc updateVpc(final long vpcId, final String vpcName, final String displayText, final String customId, final Boolean displayVpc, final String networkBootIp) { CallContext.current().setEventDetails(" Id: " + vpcId); final Account caller = CallContext.current().getCallingAccount(); @@ -1199,6 +1232,10 @@ public Vpc updateVpc(final long vpcId, final String vpcName, final String displa vpc.setDisplay(displayVpc); } + if (networkBootIp != null) { + vpc.setNetworkBootIp(networkBootIp); + } + if (_vpcDao.update(vpcId, vpc)) { s_logger.debug("Updated VPC id=" + vpcId); return _vpcDao.findById(vpcId); @@ -1211,7 +1248,7 @@ public Vpc updateVpc(final long vpcId, final String vpcName, final String displa public Pair, Integer> listVpcs(final Long id, final String vpcName, final String displayText, final List supportedServicesStr, final String cidr, final Long vpcOffId, final String state, final String accountName, Long domainId, final String keyword, final Long startIndex, final Long pageSizeVal, final Long zoneId, Boolean isRecursive, final Boolean listAll, final Boolean restartRequired, final Map tags, final Long projectId, - final Boolean display) { + final Boolean display, final String networkBootIp) { final Account caller = CallContext.current().getCallingAccount(); final List permittedAccounts = new ArrayList(); final Ternary domainIdRecursiveListProject = new Ternary(domainId, isRecursive, @@ -1234,6 +1271,7 @@ public Pair, Integer> listVpcs(final Long id, final String v sb.and("restartRequired", sb.entity().isRestartRequired(), SearchCriteria.Op.EQ); sb.and("cidr", sb.entity().getCidr(), SearchCriteria.Op.EQ); sb.and("display", sb.entity().isDisplay(), SearchCriteria.Op.EQ); + sb.and("networkBootIp", sb.entity().getNetworkBootIp(), SearchCriteria.Op.EQ); if (tags != null && !tags.isEmpty()) { final SearchBuilder tagSearch = _resourceTagDao.createSearchBuilder(); @@ -1304,6 +1342,10 @@ public Pair, Integer> listVpcs(final Long id, final String v sc.addAnd("restartRequired", SearchCriteria.Op.EQ, restartRequired); } + if (networkBootIp != null) { + sc.addAnd("networkBootIp", SearchCriteria.Op.EQ, networkBootIp); + } + final List vpcs = _vpcDao.search(sc, searchFilter); // filter by supported services @@ -1731,13 +1773,14 @@ public boolean restartVpc(final RestartVPCCmd cmd) throws ConcurrentOperationExc final boolean cleanUp = cmd.getCleanup(); final boolean makeRedundant = cmd.getMakeredundant(); final boolean livePatch = cmd.getLivePatch(); + final boolean migrateVpn = cmd.isMigrateVpn(); final User callerUser = _accountMgr.getActiveUser(CallContext.current().getCallingUserId()); - return restartVpc(vpcId, cleanUp, makeRedundant, livePatch, callerUser); + return restartVpc(vpcId, cleanUp, makeRedundant, livePatch, callerUser, migrateVpn); } @Override @ActionEvent(eventType = EventTypes.EVENT_VPC_RESTART, eventDescription = "restarting vpc") - public boolean restartVpc(Long vpcId, boolean cleanUp, boolean makeRedundant, boolean livePatch, User user) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { + public boolean restartVpc(Long vpcId, boolean cleanUp, boolean makeRedundant, boolean livePatch, User user, boolean migrateVpn) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { Vpc vpc = getActiveVpc(vpcId); if (vpc == null) { final InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find Enabled VPC by id specified"); @@ -1777,6 +1820,9 @@ public boolean restartVpc(Long vpcId, boolean cleanUp, boolean makeRedundant, bo restartRequired = true; return false; } + if (migrateVpn) { + remoteVpnMgr.migrateRemoteAccessVpn(vpc.getAccountId(), vpc.getId()); + } return true; } @@ -2758,6 +2804,10 @@ protected List listPublicIpsAssignedToVpc(final long accountId, fin @Override public PublicIp assignSourceNatIpAddressToVpc(final Account owner, final Vpc vpc) throws InsufficientAddressCapacityException, ConcurrentOperationException { + return assignSourceNatIpAddressToVpc(owner, vpc, null); + } + + private PublicIp assignSourceNatIpAddressToVpc(final Account owner, final Vpc vpc, String ignoreIp) throws InsufficientAddressCapacityException, ConcurrentOperationException { final long dcId = vpc.getZoneId(); final IPAddressVO sourceNatIp = getExistingSourceNatInVpc(owner.getId(), vpc.getId()); diff --git a/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java b/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java index 61d247d7b8a5..6c947a4df9b0 100644 --- a/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java @@ -27,16 +27,21 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.acl.SecurityChecker.AccessType; +import org.apache.cloudstack.api.ResourceDetail; import org.apache.cloudstack.api.command.user.vpn.ListRemoteAccessVpnsCmd; import org.apache.cloudstack.api.command.user.vpn.ListVpnUsersCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.pki.PkiDetail; +import org.apache.cloudstack.pki.PkiManager; +import org.apache.cloudstack.resourcedetail.dao.RemoteAccessVpnDetailsDao; import org.apache.commons.collections.CollectionUtils; import org.apache.log4j.Logger; import com.cloud.configuration.Config; +import com.cloud.domain.Domain; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; import com.cloud.event.ActionEvent; @@ -46,6 +51,7 @@ import com.cloud.exception.AccountLimitException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.RemoteAccessVpnException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.Network; import com.cloud.network.Network.Service; @@ -97,6 +103,7 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAccessVpnService, Configurable { private final static Logger s_logger = Logger.getLogger(RemoteAccessVpnManagerImpl.class); + static final ConfigKey RemoteAccessVpnType = new ConfigKey("Network", String.class, RemoteAccessVpnTypeConfigKey, "l2tp", "Type of VPN (ikev2 or l2tp)", false, ConfigKey.Scope.Account); static final ConfigKey RemoteAccessVpnClientIpRange = new ConfigKey("Network", String.class, RemoteAccessVpnClientIpRangeCK, "10.1.2.1-10.1.2.8", "The range of ips to be allocated to remote access vpn clients. The first ip in the range is used by the VPN server", false, ConfigKey.Scope.Account); @@ -107,6 +114,8 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc @Inject RemoteAccessVpnDao _remoteAccessVpnDao; @Inject + RemoteAccessVpnDetailsDao _remoteAccessVpnDetailsDao; + @Inject IPAddressDao _ipAddressDao; @Inject AccountManager _accountMgr; @@ -133,8 +142,12 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc @Inject VpcDao _vpcDao; + @Inject + private PkiManager pkiManager; + int _userLimit; int _pskLength; + SearchBuilder VpnSearch; private List getValidRemoteAccessVpnForAccount(long accountId) { @@ -157,7 +170,7 @@ private List getValidRemoteAccessVpnForAccount(long accountId @Override @DB - public RemoteAccessVpn createRemoteAccessVpn(final long publicIpId, String ipRange, boolean openFirewall, final Boolean forDisplay) throws NetworkRuleConflictException { + public RemoteAccessVpn createRemoteAccessVpn(final long publicIpId, String ipRange, boolean openFirewall, final Boolean forDisplay) throws NetworkRuleConflictException, RemoteAccessVpnException { CallContext ctx = CallContext.current(); final Account caller = ctx.getCallingAccount(); @@ -239,25 +252,85 @@ public RemoteAccessVpn createRemoteAccessVpn(final long publicIpId, String ipRan long startIp = NetUtils.ip2Long(range[0]); final String newIpRange = NetUtils.long2Ip(++startIp) + "-" + range[1]; - final String sharedSecret = PasswordGenerator.generatePresharedKey(_pskLength); + String vpnType = RemoteAccessVpnType.value(); - return Transaction.execute(new TransactionCallbackWithException() { + // use server.secret.pem instead of pre-shared key for VPN IKEv2 + final String sharedSecret = vpnType.equalsIgnoreCase(RemoteAccessVpnService.Type.IKEV2.toString()) ? null : PasswordGenerator.generatePresharedKey(_pskLength); + + RemoteAccessVpn vpn = Transaction.execute(new TransactionCallbackWithException() { @Override public RemoteAccessVpn doInTransaction(TransactionStatus status) throws NetworkRuleConflictException { if (vpcId == null) { _rulesMgr.reservePorts(ipAddr, NetUtils.UDP_PROTO, Purpose.Vpn, openFirewallFinal, caller, NetUtils.VPN_PORT, NetUtils.VPN_L2TP_PORT, NetUtils.VPN_NATT_PORT); } - RemoteAccessVpnVO vpnVO = - new RemoteAccessVpnVO(ipAddr.getAccountId(), ipAddr.getDomainId(), ipAddr.getAssociatedWithNetworkId(), publicIpId, vpcId, range[0], newIpRange, - sharedSecret); + RemoteAccessVpnVO vpnVO = new RemoteAccessVpnVO( + ipAddr.getAccountId(), + ipAddr.getDomainId(), + ipAddr.getAssociatedWithNetworkId(), + publicIpId, + vpcId, + range[0], + newIpRange, + sharedSecret, + vpnType); if (forDisplay != null) { vpnVO.setDisplay(forDisplay); } + return _remoteAccessVpnDao.persist(vpnVO); } }); + + if (vpnType.equalsIgnoreCase(RemoteAccessVpnService.Type.IKEV2.toString())) { + try { + generateIKEv2Certificates((RemoteAccessVpnVO)vpn, ipAddr, ipAddress); + } catch (RemoteAccessVpnException | RuntimeException e) { + // clean up just created vpn + Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { + return _remoteAccessVpnDao.remove(vpn.getId()); + } + }); + + throw e; + } + } + + return vpn; + } + + private void generateIKEv2Certificates(final RemoteAccessVpnVO vpn, final PublicIpAddress ipAddr, final IPAddressVO ipAddress) throws RemoteAccessVpnException { + // issue a signed certificate for the public IP through Vault + final Domain domain = _domainMgr.findDomainByIdOrPath(ipAddr.getDomainId(), null); + final PkiDetail credential = pkiManager.issueCertificate(domain, ipAddress.getAddress()); + + Transaction.execute(new TransactionCallback() { + @Override + public ResourceDetail doInTransaction(TransactionStatus status) throws RuntimeException { + // note that all the vpn details will be encrypted and then stored in database + _remoteAccessVpnDetailsDao.addDetail(vpn.getId(), PkiManager.CREDENTIAL_ISSUING_CA, credential.getIssuingCa(), false); + _remoteAccessVpnDetailsDao.addDetail(vpn.getId(), PkiManager.CREDENTIAL_SERIAL_NUMBER, credential.getSerialNumber(), false); + _remoteAccessVpnDetailsDao.addDetail(vpn.getId(), PkiManager.CREDENTIAL_CERTIFICATE, credential.getCertificate(), false); + _remoteAccessVpnDetailsDao.addDetail(vpn.getId(), PkiManager.CREDENTIAL_PRIVATE_KEY, credential.getPrivateKey(), false); + + // no need to return anything here + return null; + } + }); + + Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { + RemoteAccessVpnVO vpnVO = (RemoteAccessVpnVO)vpn; + + vpnVO.setCaCertificate(credential.getIssuingCa()); + + return _remoteAccessVpnDao.update(vpnVO.getId(), vpnVO); + } + }); } private void validateRemoteAccessVpnConfiguration() throws ConfigurationException { @@ -792,7 +865,7 @@ public String getConfigComponentName() { @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[] {RemoteAccessVpnClientIpRange}; + return new ConfigKey[] {RemoteAccessVpnType, RemoteAccessVpnClientIpRange}; } public List getVpnServiceProviders() { @@ -823,4 +896,65 @@ public RemoteAccessVpn updateRemoteAccessVpn(long id, String customId, Boolean f return _remoteAccessVpnDao.findById(id); } + @Override + public boolean migrateRemoteAccessVpn(long accountId, long vpcId) { + // check if remote access VPN is enabled on this VPC + RemoteAccessVpnVO vpnVO = _remoteAccessVpnDao.findByAccountAndVpc(accountId, vpcId); + + if (vpnVO == null) { + s_logger.debug("Remote access VPN is not enabled for VPC " + vpcId + ". Nothing to do."); + return false; + } + + final String currentType = vpnVO.getVpnType(); + final String globalType = RemoteAccessVpnType.value(); + + // check if type of remote access VPN is the same as global setting or not + if (currentType.equalsIgnoreCase(globalType)) { + s_logger.debug("Remote access VPN type of VPC " + vpcId + " is the same global setting. Nothing to do."); + return false; + } + + final String sharedSecret = globalType.equalsIgnoreCase(RemoteAccessVpnService.Type.IKEV2.toString()) ? null : PasswordGenerator.generatePresharedKey(_pskLength); + + // migrate to L2TP + if (globalType.equalsIgnoreCase(RemoteAccessVpnService.Type.L2TP.toString())) { + vpnVO.setVpnType(globalType); + vpnVO.setIpsecPresharedKey(sharedSecret); + vpnVO.setCaCertificate(null); + + _remoteAccessVpnDao.update(vpnVO.getId(), vpnVO); + + // remove details + _remoteAccessVpnDetailsDao.removeDetails(vpnVO.getId()); + + s_logger.debug("Remote access VPN for VPC " + vpcId + " migrated."); + return true; + } + + // Migrate to IKEv2 + else if (globalType.equalsIgnoreCase(RemoteAccessVpnService.Type.IKEV2.toString())) { + try { + final PublicIpAddress ipAddr = _networkMgr.getPublicIpAddress(vpnVO.getServerAddressId()); + final IPAddressVO ipAddress = _ipAddressDao.findById(vpnVO.getServerAddressId()); + + generateIKEv2Certificates(vpnVO, ipAddr, ipAddress); + + vpnVO.setVpnType(globalType); + vpnVO.setIpsecPresharedKey(null); + + _remoteAccessVpnDao.update(vpnVO.getId(), vpnVO); + + s_logger.debug("Remote access VPN for VPC " + vpcId + " migrated."); + return true; + } catch (RemoteAccessVpnException | RuntimeException e) { + s_logger.warn("Remote access VPN migration for VPC " + vpcId + " faild.", e); + return false; + } + } + + s_logger.warn("Unknown Remote access VPN type. Nothing to do"); + return false; + } + } diff --git a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java index b31f7eceadd4..ca9d0f36eee1 100755 --- a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java @@ -36,8 +36,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.exception.StorageConflictException; -import com.cloud.exception.StorageUnavailableException; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.ApiConstants; @@ -125,6 +123,8 @@ import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceInUseException; import com.cloud.exception.ResourceUnavailableException; +import com.cloud.exception.StorageConflictException; +import com.cloud.exception.StorageUnavailableException; import com.cloud.gpu.GPU; import com.cloud.gpu.HostGpuGroupsVO; import com.cloud.gpu.VGPUTypesVO; @@ -171,6 +171,7 @@ import com.cloud.utils.UriUtils; import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericSearchBuilder; @@ -497,7 +498,6 @@ public List discoverCluster(final AddClusterCmd cmd) throws I final Discoverer discoverer = getMatchingDiscover(hypervisorType); if (discoverer == null) { - throw new InvalidParameterValueException("Could not find corresponding resource manager for " + cmd.getHypervisor()); } @@ -534,6 +534,14 @@ public List discoverCluster(final AddClusterCmd cmd) throws I details.put("ovm3pool", allParams.get("ovm3pool")); details.put("ovm3cluster", allParams.get("ovm3cluster")); } + else if (hypervisorType == HypervisorType.BareMetal) { + if (cmd.getBaremetalMaasHost() != null && cmd.getBaremetalMaasKey() != null && cmd.getBaremetalMaasPool() != null) { + details.put("baremetalType", "MaaS"); + details.put("baremetalMaasHost", cmd.getBaremetalMaasHost()); + details.put("baremetalMaaSKey", DBEncryptionUtil.encrypt(cmd.getBaremetalMaasKey())); + details.put("baremetalMaasPool", cmd.getBaremetalMaasPool()); + } + } details.put("cpuOvercommitRatio", CapacityManager.CpuOverprovisioningFactor.value().toString()); details.put("memoryOvercommitRatio", CapacityManager.MemOverprovisioningFactor.value().toString()); _clusterDetailsDao.persist(cluster.getId(), details); @@ -2205,7 +2213,7 @@ protected HostVO createHostVO(final StartupCommand[] cmds, final ServerResource host.setStorageMacAddress(startup.getStorageMacAddress()); host.setStorageNetmask(startup.getStorageNetmask()); host.setVersion(startup.getVersion()); - host.setName(startup.getName()); + host.setName(StringUtils.isEmpty(startup.getName()) ? resource.getName() : startup.getName()); host.setManagementServerId(_nodeId); host.setStorageUrl(startup.getIqn()); host.setLastPinged(System.currentTimeMillis() >> 10); diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 074fff86be5a..872318a76a06 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -324,6 +324,7 @@ import org.apache.cloudstack.api.command.admin.vpc.ListVPCsCmdByAdmin; import org.apache.cloudstack.api.command.admin.vpc.UpdateVPCCmdByAdmin; import org.apache.cloudstack.api.command.admin.vpc.UpdateVPCOfferingCmd; +import org.apache.cloudstack.api.command.admin.vpc.UpdateVPCSourceNATCmd; import org.apache.cloudstack.api.command.admin.zone.CreateZoneCmd; import org.apache.cloudstack.api.command.admin.zone.DeleteZoneCmd; import org.apache.cloudstack.api.command.admin.zone.ListZonesCmdByAdmin; @@ -3249,6 +3250,7 @@ public List> getCommands() { cmdList.add(DeletePrivateGatewayCmd.class); cmdList.add(DeleteVPCOfferingCmd.class); cmdList.add(UpdateVPCOfferingCmd.class); + cmdList.add(UpdateVPCSourceNATCmd.class); cmdList.add(CreateZoneCmd.class); cmdList.add(DeleteZoneCmd.class); cmdList.add(MarkDefaultZoneForAccountCmd.class); diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java index 4a353a1aa1c8..b1576f4c5598 100644 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@ -1599,7 +1599,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { + host.getName() + " . VM: " + vmNetworkStat.getVmName() + " Reported: " + toHumanReadableSize(vmNetworkStat.getBytesSent()) + " Stored: " + toHumanReadableSize(vmNetworkStat_lock.getCurrentBytesSent())); } - vmNetworkStat_lock.setNetBytesSent(vmNetworkStat_lock.getNetBytesSent() + vmNetworkStat_lock.getCurrentBytesSent()); + vmNetworkStat_lock.setNetBytesSent(0); } vmNetworkStat_lock.setCurrentBytesSent(vmNetworkStat.getBytesSent()); @@ -1609,7 +1609,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { + host.getName() + " . VM: " + vmNetworkStat.getVmName() + " Reported: " + toHumanReadableSize(vmNetworkStat.getBytesReceived()) + " Stored: " + toHumanReadableSize(vmNetworkStat_lock.getCurrentBytesReceived())); } - vmNetworkStat_lock.setNetBytesReceived(vmNetworkStat_lock.getNetBytesReceived() + vmNetworkStat_lock.getCurrentBytesReceived()); + vmNetworkStat_lock.setNetBytesReceived(0); } vmNetworkStat_lock.setCurrentBytesReceived(vmNetworkStat.getBytesReceived()); diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 140f62d14b88..3fdb88977055 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -981,11 +981,12 @@ public void removeStoragePoolFromCluster(long hostId, String iScsiName, StorageP cmd.setDetails(details); cmd.setRemoveDatastore(true); + cmd.setPool(storagePool); final Answer answer = _agentMgr.easySend(hostId, cmd); if (answer == null || !answer.getResult()) { - String errMsg = "Error interacting with host (related to DeleteStoragePoolCommand)" + (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : ""); + String errMsg = "Error interacting with host (related to DeleteStoragePoolCommand)" + (answer != null && StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : ""); s_logger.error(errMsg); diff --git a/server/src/main/java/com/cloud/storage/TemplateProfile.java b/server/src/main/java/com/cloud/storage/TemplateProfile.java index b90409480bca..0f4aa0bb9ba6 100644 --- a/server/src/main/java/com/cloud/storage/TemplateProfile.java +++ b/server/src/main/java/com/cloud/storage/TemplateProfile.java @@ -32,6 +32,7 @@ public class TemplateProfile { Boolean passwordEnabled; Boolean sshKeyEnbaled; Boolean requiresHvm; + String bootFilename; String url; Boolean isPublic; Boolean featured; @@ -54,6 +55,7 @@ public class TemplateProfile { Boolean directDownload; Boolean deployAsIs; Long size; + private long zoneId; public TemplateProfile(Long templateId, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHvm, String url, Boolean isPublic, Boolean featured, Boolean isExtractable, ImageFormat format, Long guestOsId, List zoneIdList, HypervisorType hypervisorType, @@ -93,32 +95,31 @@ public TemplateProfile(Long userId, VMTemplateVO template, Long zoneId) { } public TemplateProfile(Long templateId, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHvm, String url, - Boolean isPublic, Boolean featured, Boolean isExtractable, ImageFormat format, Long guestOsId, List zoneId, - + Boolean isPublic, Boolean featured, Boolean isExtractable, ImageFormat format, Long guestOsId, List zoneId, HypervisorType hypervisorType, String accountName, Long domainId, Long accountId, String chksum, Boolean bootable, String templateTag, Map details, Boolean sshKeyEnabled, Long imageStoreId, Boolean isDynamicallyScalable, TemplateType templateType, Boolean directDownload, Boolean deployAsIs) { this(templateId, - userId, - name, - displayText, - bits, - passwordEnabled, - requiresHvm, - url, - isPublic, - featured, - isExtractable, - format, - guestOsId, - zoneId, - hypervisorType, - accountName, - domainId, - accountId, - chksum, - bootable, - details, - sshKeyEnabled); + userId, + name, + displayText, + bits, + passwordEnabled, + requiresHvm, + url, + isPublic, + featured, + isExtractable, + format, + guestOsId, + zoneId, + hypervisorType, + accountName, + domainId, + accountId, + chksum, + bootable, + details, + sshKeyEnabled); this.templateTag = templateTag; this.isDynamicallyScalable = isDynamicallyScalable; this.templateType = templateType; @@ -126,6 +127,17 @@ public TemplateProfile(Long templateId, Long userId, String name, String display this.deployAsIs = deployAsIs; } + public TemplateProfile(Long templateId, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHvm, String url, + Boolean isPublic, Boolean featured, Boolean isExtractable, ImageFormat format, Long guestOsId, List zoneId, + HypervisorType hypervisorType, String accountName, Long domainId, Long accountId, String chksum, Boolean bootable, String templateTag, Map details, + Boolean sshKeyEnabled, Long imageStoreId, Boolean isDynamicallyScalable, TemplateType templateType, Boolean directDownload, Boolean deployAsIs, String bootFilename) { + this(templateId, userId, name, displayText, bits, passwordEnabled, requiresHvm, url, + isPublic, featured, isExtractable, format, guestOsId, zoneId, + hypervisorType, accountName, domainId, accountId, chksum, bootable, templateTag, details, + sshKeyEnabled, imageStoreId, isDynamicallyScalable, templateType, directDownload, deployAsIs); + this.bootFilename = bootFilename; + } + public Long getTemplateId() { return templateId; } @@ -182,6 +194,14 @@ public void setRequiresHVM(Boolean hvm) { this.requiresHvm = hvm; } + public String getBootFilename() { + return bootFilename; + } + + public void setBootFilename(String bootFilename) { + this.bootFilename = bootFilename; + } + public String getUrl() { return url; } @@ -337,4 +357,8 @@ public void setSize(Long size) { public boolean isDeployAsIs() { return this.deployAsIs; } + + public long getZoneId() { + return zoneId; + } } diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 9ce294d2332f..e0e0147f89b1 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -774,6 +774,24 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept maxIops = diskOffering.getMaxIops(); } + //check if iops/gb is defined, if so, use it + if (diskOffering.getMinIopsPerGb() != null) { + minIops = sizeInGB * diskOffering.getMinIopsPerGb(); + } + + if (diskOffering.getMaxIopsPerGb() != null) { + maxIops = sizeInGB * diskOffering.getMaxIopsPerGb(); + } + + //check limits for IOPS and set them if required + if (diskOffering.getHighestMinIops() != null && minIops !=null && minIops > diskOffering.getHighestMinIops()) { + minIops = diskOffering.getHighestMinIops(); + } + + if (diskOffering.getHighestMaxIops() != null && maxIops != null && maxIops > diskOffering.getHighestMaxIops()) { + maxIops = diskOffering.getHighestMaxIops(); + } + if (!validateVolumeSizeInBytes(size)) { throw new InvalidParameterValueException(String.format("Invalid size for custom volume creation: %s, max volume size is: %s GB", NumbersUtil.toReadableSize(size), VolumeOrchestrationService.MaxVolumeSize.value())); } @@ -805,6 +823,9 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept zoneId = snapshotCheck.getDataCenterId(); } + size = snapshotCheck.getSize(); // ; disk offering is used for tags purposes + Long sizeInGB = size/(1024 * 1024 * 1024); + if (diskOffering == null) { // Pure snapshot is being used to create volume. diskOfferingId = snapshotCheck.getDiskOfferingId(); diskOffering = _diskOfferingDao.findById(diskOfferingId); @@ -821,6 +842,23 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept _configMgr.checkDiskOfferingAccess(null, diskOffering, _dcDao.findById(zoneId)); + // IOPS/GB overrides the manually set IOPS + if (diskOffering.getMinIopsPerGb() != null) { + minIops = sizeInGB * diskOffering.getMinIopsPerGb(); + } + + if (diskOffering.getMaxIopsPerGb() != null) { + maxIops = sizeInGB * diskOffering.getMaxIopsPerGb(); + } + + if (diskOffering.getHighestMinIops() != null && minIops != null && minIops > diskOffering.getHighestMinIops()) { + minIops = diskOffering.getHighestMinIops(); + } + + if (diskOffering.getHighestMaxIops() != null && maxIops != null && maxIops > diskOffering.getHighestMaxIops()) { + maxIops = diskOffering.getHighestMaxIops(); + } + // check snapshot permissions _accountMgr.checkAccess(caller, null, true, snapshotCheck); @@ -1013,6 +1051,7 @@ protected VolumeVO createVolumeFromSnapshot(VolumeVO volume, long snapshotId, Lo @ActionEvent(eventType = EventTypes.EVENT_VOLUME_RESIZE, eventDescription = "resizing volume", async = true) public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationException { Long newSize = cmd.getSize(); + Long newSizeInGb = null; Long newMinIops = cmd.getMinIops(); Long newMaxIops = cmd.getMaxIops(); Integer newHypervisorSnapshotReserve = null; @@ -1073,7 +1112,7 @@ public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationExcep // no parameter provided; just use the original size of the volume newSize = volume.getSize(); } - + newSizeInGb = newSize >> 30; newMinIops = cmd.getMinIops(); if (newMinIops != null) { @@ -1085,6 +1124,10 @@ public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationExcep newMinIops = volume.getMinIops(); } + if (diskOffering.getMinIopsPerGb() != null) { + newMinIops = newSizeInGb * diskOffering.getMinIopsPerGb(); + } + newMaxIops = cmd.getMaxIops(); if (newMaxIops != null) { @@ -1095,6 +1138,9 @@ public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationExcep // no parameter provided; just use the original max IOPS of the volume newMaxIops = volume.getMaxIops(); } + if (diskOffering.getMaxIopsPerGb()!=null) { + newMaxIops = newSizeInGb * diskOffering.getMaxIopsPerGb(); + } validateIops(newMinIops, newMaxIops, volume.getPoolType()); } else { @@ -1149,6 +1195,7 @@ public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationExcep } checkIfVolumeIsRootAndVmIsRunning(newSize, volume, vmInstanceVO); + if (newDiskOffering.isCustomizedIops() != null && newDiskOffering.isCustomizedIops()) { newMinIops = cmd.getMinIops() != null ? cmd.getMinIops() : volume.getMinIops(); newMaxIops = cmd.getMaxIops() != null ? cmd.getMaxIops() : volume.getMaxIops(); @@ -1159,6 +1206,14 @@ public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationExcep newMaxIops = newDiskOffering.getMaxIops(); } + if (newDiskOffering.getMinIopsPerGb() != null) { + newMinIops = newSizeInGb * newDiskOffering.getMinIopsPerGb(); + } + + if (newDiskOffering.getMaxIopsPerGb() != null) { + newMaxIops = newSizeInGb * newDiskOffering.getMaxIopsPerGb(); + } + // if the hypervisor snapshot reserve value is null, it must remain null (currently only KVM uses null and null is all KVM uses for a value here) newHypervisorSnapshotReserve = volume.getHypervisorSnapshotReserve() != null ? newDiskOffering.getHypervisorSnapshotReserve() : null; } @@ -1214,6 +1269,16 @@ public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationExcep } } + Long highestMinIops = diskOffering.getHighestMinIops(); + if (newMinIops != null && highestMinIops!= null && newMinIops > highestMinIops) { + newMinIops = highestMinIops; + } + + Long highestMaxIops = diskOffering.getHighestMaxIops(); + if (newMaxIops != null && highestMaxIops!= null && newMaxIops > highestMaxIops) { + newMaxIops = highestMaxIops; + } + // Note: The storage plug-in in question should perform validation on the IOPS to check if a sufficient number of IOPS is available to perform // the requested change @@ -2113,6 +2178,7 @@ private Volume orchestrateAttachVolumeToVM(Long vmId, Long volumeId, Long device //don't create volume on primary storage if its being attached to the vm which Root's volume hasn't been created yet StoragePoolVO destPrimaryStorage = null; + if (existingVolumeOfVm != null && !existingVolumeOfVm.getState().equals(Volume.State.Allocated)) { destPrimaryStorage = _storagePoolDao.findById(existingVolumeOfVm.getPoolId()); if (s_logger.isTraceEnabled() && destPrimaryStorage != null) { @@ -3698,6 +3764,7 @@ private String orchestrateExtractVolume(long volumeId, long zoneId) { volumeStoreRef.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOADED); volumeStoreRef.setDownloadPercent(100); volumeStoreRef.setZoneId(zoneId); + volumeStoreRef.setSize(vol.getSize()); _volumeStoreDao.update(volumeStoreRef.getId(), volumeStoreRef); diff --git a/server/src/main/java/com/cloud/storage/download/DownloadMonitorImpl.java b/server/src/main/java/com/cloud/storage/download/DownloadMonitorImpl.java index 1954cdea6879..4c8915e5c968 100644 --- a/server/src/main/java/com/cloud/storage/download/DownloadMonitorImpl.java +++ b/server/src/main/java/com/cloud/storage/download/DownloadMonitorImpl.java @@ -25,6 +25,7 @@ import javax.inject.Inject; +import org.apache.cloudstack.engine.subsystem.api.storage.StorageCacheManager; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -73,6 +74,8 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor private ConfigurationDao _configDao; @Inject private EndPointSelector _epSelector; + @Inject + private StorageCacheManager cacheManager; private String _copyAuthPasswd; private String _proxy = null; @@ -218,6 +221,10 @@ public void downloadVolumeToStorage(DataObject volume, AsyncCompletionCallback zones = profile.getZoneIdList(); //zones is null when this template is to be registered to all zones - if (zones == null){ + if (zones == null) { createTemplateWithinZone(null, profile, template); - } - else { + } else { for (Long zId : zones) { createTemplateWithinZone(zId, profile, template); } diff --git a/server/src/main/java/com/cloud/template/TemplateAdapter.java b/server/src/main/java/com/cloud/template/TemplateAdapter.java index 86dd0d3cad5d..83ab9b042254 100644 --- a/server/src/main/java/com/cloud/template/TemplateAdapter.java +++ b/server/src/main/java/com/cloud/template/TemplateAdapter.java @@ -26,6 +26,7 @@ import org.apache.cloudstack.api.command.user.template.ExtractTemplateCmd; import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd; import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; +import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand; import com.cloud.exception.ResourceAllocationException; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -34,7 +35,6 @@ import com.cloud.storage.VMTemplateVO; import com.cloud.user.Account; import com.cloud.utils.component.Adapter; -import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand; public interface TemplateAdapter extends Adapter { public static class TemplateAdapterType { @@ -80,4 +80,10 @@ TemplateProfile prepare(boolean isIso, long userId, String name, String displayT Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneId, HypervisorType hypervisorType, String chksum, Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshKeyEnabled, String imageStoreUuid, Boolean isDynamicallyScalable, TemplateType templateType, boolean directDownload, boolean deployAsIs) throws ResourceAllocationException; + TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, Integer bits, + Boolean passwordEnabled, Boolean requiresHVM, String url, Boolean isPublic, Boolean featured, + Boolean isExtractable, String format, Long guestOSId, List zoneId, HypervisorType hypervisorType, + String chksum, Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshKeyEnabled, + String imageStoreUuid, Boolean isDynamicallyScalable, TemplateType templateType, boolean directDownload, + boolean deployAsIs, String bootFilename) throws ResourceAllocationException; } diff --git a/server/src/main/java/com/cloud/template/TemplateAdapterBase.java b/server/src/main/java/com/cloud/template/TemplateAdapterBase.java index a9cd9478c58b..3b9829f4f159 100644 --- a/server/src/main/java/com/cloud/template/TemplateAdapterBase.java +++ b/server/src/main/java/com/cloud/template/TemplateAdapterBase.java @@ -268,6 +268,20 @@ public TemplateProfile prepare(boolean isIso, long userId, String name, String d } + public TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, + Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneId, HypervisorType hypervisorType, String chksum, + Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshKeyEnabled, String imageStoreUuid, Boolean isDynamicallyScalable, + TemplateType templateType, boolean directDownload, boolean deployAsIs, String bootFilename) throws ResourceAllocationException { + TemplateProfile templateProfile = prepare(isIso, userId, name, displayText, bits, passwordEnabled, requiresHVM, url, + isPublic, featured, isExtractable, format, guestOSId, zoneId, hypervisorType, chksum, + bootable, templateTag, templateOwner, details, sshKeyEnabled, imageStoreUuid, isDynamicallyScalable, + templateType, directDownload, deployAsIs); + + templateProfile.setBootFilename(bootFilename); + return templateProfile; + } + + @Override public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocationException { //check if the caller can operate with the template owner @@ -305,8 +319,8 @@ public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocatio } return prepare(false, CallContext.current().getCallingUserId(), cmd.getTemplateName(), cmd.getDisplayText(), cmd.getBits(), cmd.isPasswordEnabled(), cmd.getRequiresHvm(), cmd.getUrl(), cmd.isPublic(), cmd.isFeatured(), cmd.isExtractable(), cmd.getFormat(), cmd.getOsTypeId(), zoneId, hypervisorType, cmd.getChecksum(), true, - cmd.getTemplateTag(), owner, details, cmd.isSshKeyEnabled(), null, cmd.isDynamicallyScalable(), isRouting ? TemplateType.ROUTING : TemplateType.USER, - cmd.isDirectDownload(), cmd.isDeployAsIs()); + cmd.getTemplateTag(), owner, cmd.getDetails(), cmd.isSshKeyEnabled(), null, cmd.isDynamicallyScalable(), isRouting ? TemplateType.ROUTING : TemplateType.USER, + cmd.isDirectDownload(), cmd.isDeployAsIs(), cmd.getBootFilename()); } @@ -398,13 +412,17 @@ protected VMTemplateVO persistTemplate(TemplateProfile profile, VirtualMachineTe new VMTemplateVO(profile.getTemplateId(), profile.getName(), profile.getFormat(), profile.isPublic(), profile.isFeatured(), profile.isExtractable(), profile.getTemplateType(), profile.getUrl(), profile.isRequiresHVM(), profile.getBits(), profile.getAccountId(), profile.getCheckSum(), profile.getDisplayText(), profile.isPasswordEnabled(), profile.getGuestOsId(), profile.isBootable(), profile.getHypervisorType(), - profile.getTemplateTag(), profile.getDetails(), profile.isSshKeyEnabled(), profile.IsDynamicallyScalable(), profile.isDirectDownload(), profile.isDeployAsIs()); + profile.getTemplateTag(), profile.getDetails(), profile.isSshKeyEnabled(), profile.IsDynamicallyScalable(), profile.isDirectDownload(), profile.isDeployAsIs(), profile.getBootFilename()); template.setState(initialState); if (profile.isDirectDownload()) { template.setSize(profile.getSize()); } + if(profile.getFormat() == ImageFormat.PXEBOOT) { + template.setSize(0L); + } + if (zoneIdList == null) { List dcs = _dcDao.listAll(); diff --git a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java index 2f1e1a552d44..f91e550d7cac 100755 --- a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java @@ -1922,6 +1922,13 @@ public VMTemplateVO createPrivateTemplateRecord(CreateTemplateCmd cmd, Account t } } } + + Map guestDetails = _guestOSDao.loadDetails(guestOS); + + if (guestDetails != null) { + details.putAll(guestDetails); + } + if (cmd.getDetails() != null) { details.remove(VmDetailConstants.ENCRYPTED_PASSWORD); // new password will be generated during vm deployment from password enabled template details.putAll(cmd.getDetails()); @@ -2054,7 +2061,9 @@ private VMTemplateVO updateTemplateOrIso(BaseUpdateTemplateOrIsoCmd cmd) { Boolean requiresHvm = cmd.getRequiresHvm(); Integer sortKey = cmd.getSortKey(); Map details = cmd.getDetails(); + String bootFilename = cmd.getBootFilename(); Account account = CallContext.current().getCallingAccount(); + boolean cleanupDetails = cmd.isCleanupDetails(); // verify that template exists @@ -2112,6 +2121,7 @@ private VMTemplateVO updateTemplateOrIso(BaseUpdateTemplateOrIsoCmd cmd) { isDynamicallyScalable == null && isRoutingTemplate == null && templateType == null && + bootFilename == null && (! cleanupDetails && details == null) //update details in every case except this one ); if (!updateNeeded) { @@ -2199,6 +2209,9 @@ private VMTemplateVO updateTemplateOrIso(BaseUpdateTemplateOrIsoCmd cmd) { } validateDetails(template, details); + if (bootFilename != null) { + template.setBootFilename(bootFilename); + } if (cleanupDetails) { template.setDetails(null); diff --git a/server/src/main/java/com/cloud/test/DatabaseConfig.java b/server/src/main/java/com/cloud/test/DatabaseConfig.java index 5f475b673cea..a2ac9b320cd6 100644 --- a/server/src/main/java/com/cloud/test/DatabaseConfig.java +++ b/server/src/main/java/com/cloud/test/DatabaseConfig.java @@ -1022,6 +1022,18 @@ protected void saveDiskOffering() { Long iopsWriteRate = Long.parseLong(_currentObjectParams.get("iopsWriteRate")); if (iopsWriteRate != null && (iopsWriteRate > 0)) diskOffering.setIopsWriteRate(iopsWriteRate); + Long minIopsPerGb = Long.parseLong(_currentObjectParams.get("minIopsPerGb")); + if (minIopsPerGb > 0) + diskOffering.setMinIopsPerGb(minIopsPerGb); + Long maxIopsPerGb = Long.parseLong(_currentObjectParams.get("maxIopsPerGb")); + if (maxIopsPerGb > 0) + diskOffering.setMaxIopsPerGb(maxIopsPerGb); + Long highestMinIops = Long.parseLong(_currentObjectParams.get("highestMinIops")); + if (highestMinIops > 0) + diskOffering.setHighestMinIops(highestMinIops); + Long highestMaxIops = Long.parseLong(_currentObjectParams.get("highestMaxIops")); + if (highestMaxIops > 0) + diskOffering.setHighestMaxIops(highestMaxIops); DiskOfferingDaoImpl offering = ComponentContext.inject(DiskOfferingDaoImpl.class); try { diff --git a/server/src/main/java/com/cloud/usage/UsageServiceImpl.java b/server/src/main/java/com/cloud/usage/UsageServiceImpl.java index e9264554ec50..394d0280a514 100644 --- a/server/src/main/java/com/cloud/usage/UsageServiceImpl.java +++ b/server/src/main/java/com/cloud/usage/UsageServiceImpl.java @@ -145,13 +145,15 @@ public boolean generateUsageRecords(GenerateUsageRecordsCmd cmd) { if (immediateJob == null) { UsageJobVO job = _usageJobDao.getLastJob(); - String host = null; - int pid = 0; - if (job != null) { + if (job == null) { + String host = null; + int pid = 0; + host = job.getHost(); pid = ((job.getPid() == null) ? 0 : job.getPid().intValue()); + + _usageJobDao.createNewJob(host, pid, UsageJobVO.JOB_TYPE_SINGLE); } - _usageJobDao.createNewJob(host, pid, UsageJobVO.JOB_TYPE_SINGLE); } } finally { txn.close(); @@ -218,7 +220,7 @@ public Pair, Integer> getUsageRecords(ListUsageRecordsCmd ", using pageSize: " + cmd.getPageSizeVal() + " and startIndex: " + cmd.getStartIndex()); } - Filter usageFilter = new Filter(UsageVO.class, "id", true, cmd.getStartIndex(), cmd.getPageSizeVal()); + Filter usageFilter = new Filter(UsageVO.class, "startDate", true, cmd.getStartIndex(), cmd.getPageSizeVal()); SearchCriteria sc = _usageDao.createSearchCriteria(); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 11741a4e4940..c661a5c230f5 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -3986,7 +3986,10 @@ private UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOffe } // Check templates permissions - _accountMgr.checkAccess(owner, AccessType.UseEntry, false, template); + Account templateOwner = _accountMgr.getAccount(template.getAccountId()); + if(caller.getAccountId() != templateOwner.getAccountId()){ + _accountMgr.checkAccess(owner, AccessType.UseEntry, false, template); + } // check if the user data is correct userData = validateUserData(userData, httpmethod); @@ -4684,7 +4687,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmNetworkStat.getVmName() + " Reported: " + toHumanReadableSize(vmNetworkStat.getBytesSent()) + " Stored: " + toHumanReadableSize(vmNetworkStat_lock.getCurrentBytesSent())); } - vmNetworkStat_lock.setNetBytesSent(vmNetworkStat_lock.getNetBytesSent() + vmNetworkStat_lock.getCurrentBytesSent()); + vmNetworkStat_lock.setNetBytesSent(0); } vmNetworkStat_lock.setCurrentBytesSent(vmNetworkStat.getBytesSent()); @@ -4694,7 +4697,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmNetworkStat.getVmName() + " Reported: " + toHumanReadableSize(vmNetworkStat.getBytesReceived()) + " Stored: " + toHumanReadableSize(vmNetworkStat_lock.getCurrentBytesReceived())); } - vmNetworkStat_lock.setNetBytesReceived(vmNetworkStat_lock.getNetBytesReceived() + vmNetworkStat_lock.getCurrentBytesReceived()); + vmNetworkStat_lock.setNetBytesReceived(0); } vmNetworkStat_lock.setCurrentBytesReceived(vmNetworkStat.getBytesReceived()); diff --git a/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataTO.java b/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataTO.java index 115ee718fbed..fa0e74876685 100644 --- a/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataTO.java +++ b/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataTO.java @@ -57,4 +57,9 @@ public String getPath() { public long getId() { return id; } + + @Override + public String getName() { + return null; + } } diff --git a/server/src/main/java/org/apache/cloudstack/pki/PkiConfig.java b/server/src/main/java/org/apache/cloudstack/pki/PkiConfig.java new file mode 100644 index 000000000000..01a686596e76 --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/pki/PkiConfig.java @@ -0,0 +1,123 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.pki; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.apache.cloudstack.framework.config.ConfigKey; + +/** + * @author Khosrow Moossavi + * @since 4.10.0.228-cca + */ +public enum PkiConfig { + CertificateBrand("Network", String.class, "pki.engine.certificate.brand", "CloudStack", "Brand name to be used in Certificate's common name"), + CertificateCommonName("Network", String.class, "pki.engine.certificate.common.name", "__BRAND__ VPN __DOMAIN__ CA", + "Certificate's common name template (brand will be filled from 'pki.engine.certificate.brand', domain will be provided on the fly"), + VaultEnabled("Network", Boolean.class, "pki.engine.vault.enabled", "false", "Enable Vault as the backend PKI engine"), + VaultUrl("Network", String.class, "pki.engine.vault.url", "", "Full URL of Vault endpoint (e.g. http://127.0.0.1:8200)"), + VaultCertUrl("Network", String.class, "pki.engine.vault.url.cert", "/v1//ca", "Full URL value for the Issuing Certificate"), + VaultCrlUrl("Network", String.class, "pki.engine.vault.url.crl", "/v1//crl", "Full URL for the CRL Distribution Point"), + VaultVerifySsl("Network", Boolean.class, "pki.engine.vault.verify.ssl", "true", "Verify SSL of Vault endpoint"), + VaultToken("Network", String.class, "pki.engine.vault.token", "", "Token to access Vault"), + VaultAppRoleId("Network", String.class, "pki.engine.vault.token.role.id", "", "App Role id to be used to fetch token to access Vault"), + VaultAppSecretId("Network", String.class, "pki.engine.vault.token.secret.id", "", "Secret id to be used to fetch token to access Vault"), + VaultPkiTtl("Network", String.class, "pki.engine.vault.ttl", "87600h", "Vault PKI TTL (e.g. 87600h)"), + VaultCATtl("Network", String.class, "pki.engine.vault.cca.ttl", "87600h", "Vault PKI root CA TTL (e.g. 87600h)"), + VaultRoleName("Network", String.class, "pki.engine.vault.role.name", "cloudstack-vpn", "Vault PKI role name"), + VaultRoleTtl("Network", String.class, "pki.engine.vault.role.ttl", "43800h", "Vault PKI role TTL (e.g. 43800h)"), + VaultMounthPath("Network", String.class, "pki.engine.vault.mount.path", "pki/cloudstack", "Vault PKI mount point prefix (must not end with trailing slash)"); + + private final String _category; + private final Class _type; + private final String _name; + private final String _defaultValue; + private final String _description; + private final boolean _dynamic; + private final ConfigKey.Scope _scope; + + private static final List PkiEngineConfigKeys = new ArrayList(); + + static { + Arrays.stream(PkiConfig.values()).forEach(c -> PkiEngineConfigKeys.add(c.key())); + } + + private PkiConfig(String category, Class type, String name, String defaultValue, String description) { + _category = category; + _type = type; + _name = name; + _defaultValue = defaultValue; + _description = description; + _dynamic = false; + _scope = ConfigKey.Scope.Global; + } + + public String getCategory() { + return _category; + } + + public Class getType() { + return _type; + } + + public String getName() { + return _name; + } + + public String getDefaultValue() { + return _defaultValue; + } + + public String getDescription() { + return _description; + } + + public boolean isDynamic() { + return _dynamic; + } + + public ConfigKey.Scope getScope() { + return _scope; + } + + public String key() { + return _name; + } + + public static boolean doesKeyExist(String key) { + return PkiEngineConfigKeys.contains(key); + } + + public static ConfigKey[] asConfigKeys() { + return Arrays.stream(PkiConfig.values()) + .map(config -> asConfigKey(config)) + .toArray(ConfigKey[]::new); + } + + public static ConfigKey asConfigKey(PkiConfig config) { + return new ConfigKey<>( + config.getCategory(), + config.getType(), + config.getName(), + config.getDefaultValue(), + config.getDescription(), + config.isDynamic(), + config.getScope()); + } +} diff --git a/server/src/main/java/org/apache/cloudstack/pki/PkiEngine.java b/server/src/main/java/org/apache/cloudstack/pki/PkiEngine.java new file mode 100644 index 000000000000..0be2ca9932d5 --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/pki/PkiEngine.java @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.pki; + +import com.cloud.domain.Domain; +import com.cloud.utils.net.Ip; + +/** + * @author Khosrow Moossavi + * @since 4.10.0.228-cca + */ +public interface PkiEngine { + /** + * Issue a Certificate for specific IP and specific Domain act as the CA. This will have two + * known implementation, {@link PkiEngineDefault} and {@link PkiEngineVault}. Vault implementation + * will delegate everything CA-related to Vault to process it, while Default will assume the + * CA-related actions will be done within the scope of the same application. + * + * @param domain object to extract name and id to be used to issuing CA + * @param publicIp to be included in the certificate + * + * @return details about the just signed PKI, including issuing CA, certificate, private key and serial number + * + * @throws Exception + */ + PkiDetail issueCertificate(Domain domain, Ip publicIp) throws Exception; + + /** + * Get a Certificate for specific Domain act as the CA + * + * @param domain object to extract its id to be find the issuing CA + * + * @return details about signed PKI, including issuing CA, certificate and serial number + * + * @throws Exception + */ + PkiDetail getCertificate(Domain domain) throws Exception; +} diff --git a/server/src/main/java/org/apache/cloudstack/pki/PkiEngineDefault.java b/server/src/main/java/org/apache/cloudstack/pki/PkiEngineDefault.java new file mode 100644 index 000000000000..98b2c46cc546 --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/pki/PkiEngineDefault.java @@ -0,0 +1,47 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.pki; + +import java.util.Map; + +import com.cloud.domain.Domain; +import com.cloud.utils.net.Ip; + +/** + * @author Khosrow Moossavi + * @since 4.10.0.228-cca + */ +public class PkiEngineDefault implements PkiEngine { + public PkiEngineDefault(Map configs) { + } + + /* (non-Javadoc) + * @see org.apache.cloudstack.pki.PkiEngine#issueCertificate(com.cloud.domain.Domain, com.cloud.utils.net.Ip) + */ + @Override + public PkiDetail issueCertificate(Domain domain, Ip publicIp) throws Exception { + throw new UnsupportedOperationException("Cannot issue certificate with Default implementation, use Vault instead."); + } + + /* (non-Javadoc) + * @see org.apache.cloudstack.pki.PkiEngine#getCertificate(com.cloud.domain.Domain) + */ + @Override + public PkiDetail getCertificate(Domain domain) { + throw new UnsupportedOperationException("Cannot get certificate with Default implementation, use Vault instead."); + } +} diff --git a/server/src/main/java/org/apache/cloudstack/pki/PkiEngineVault.java b/server/src/main/java/org/apache/cloudstack/pki/PkiEngineVault.java new file mode 100644 index 000000000000..f923442a450c --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/pki/PkiEngineVault.java @@ -0,0 +1,365 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.pki; + +import java.util.Arrays; +import java.util.Map; + +import org.apache.commons.lang.BooleanUtils; +import org.apache.commons.lang3.StringUtils; +import org.springframework.util.Assert; + +import com.bettercloud.vault.SslConfig; +import com.bettercloud.vault.Vault; +import com.bettercloud.vault.VaultConfig; +import com.bettercloud.vault.VaultException; +import com.bettercloud.vault.api.Logical; +import com.bettercloud.vault.api.pki.Credential; +import com.bettercloud.vault.api.pki.Pki; +import com.bettercloud.vault.api.pki.RoleOptions; +import com.bettercloud.vault.response.AuthResponse; +import com.bettercloud.vault.response.LogicalResponse; +import com.bettercloud.vault.response.PkiResponse; +import com.cloud.domain.Domain; +import com.cloud.utils.net.Ip; +import com.google.common.collect.ImmutableMap; + +/** + * @author Khosrow Moossavi + * @since 4.10.0.228-cca + */ +public class PkiEngineVault implements PkiEngine { + public static final int RETRY_COUNT = 2; + public static final int RETRY_INTERVAL_MILISECONDS = 2000; + public static final int OPEN_CONNECTION_TIMEOUT_SECONDS = 5; + public static final int READ_CONNECTION_TIMEOUT_SECONDS = 5; + + private static final String DEFAULT_FAULT_CERT_URL = "/v1//ca"; + private static final String DEFAULT_FAULT_CRL_URL = "/v1//crl"; + + private final String _vaultUrl; + private final boolean _vaultVerifySsl; + private final String _vaultCertUrl; + private final String _vaultCrlUrl; + private final String _vaultToken; + private final String _vaultTokenRoleId; + private final String _vaultTokenSecretId; + private final String _vaultRoleName; + private final String _vaultMountPath; + + private final String _certificateCommonName; + private final String _vaultPkiTtl; + private final String _vaultCATtl; + private final String _vaultRoleTtl; + + public PkiEngineVault(Map configs) { + _vaultUrl = configs.get(PkiConfig.VaultUrl.key()); + Assert.isTrue(StringUtils.isNotEmpty(_vaultUrl), "PKI Engine: URL of Vault endpoint is missing"); + + _vaultVerifySsl = BooleanUtils.toBoolean(configs.get(PkiConfig.VaultVerifySsl.key())); + + _vaultCertUrl = configs.get(PkiConfig.VaultCertUrl.key()); + _vaultCrlUrl = configs.get(PkiConfig.VaultCrlUrl.key()); + + _vaultToken = configs.get(PkiConfig.VaultToken.key()); + + // if Token provided ignore RoleId and SecretId + if (StringUtils.isNotEmpty(_vaultToken)) { + _vaultTokenRoleId = null; + _vaultTokenSecretId = null; + } else { + _vaultTokenRoleId = configs.get(PkiConfig.VaultAppRoleId.key()); + _vaultTokenSecretId = configs.get(PkiConfig.VaultAppSecretId.key()); + + if (StringUtils.isEmpty(_vaultTokenRoleId) && StringUtils.isEmpty(_vaultTokenSecretId)) { + throw new IllegalArgumentException("PKI Engine: Vault Token access and RoleId and SecretId are missing"); + } + } + + _vaultRoleName = configs.get(PkiConfig.VaultRoleName.key()); + Assert.isTrue(StringUtils.isNotEmpty(_vaultRoleName), "PKI Engine: Vault PKI role name is missing"); + + String mountPath = configs.get(PkiConfig.VaultMounthPath.key()); + + Assert.isTrue(StringUtils.isNotEmpty(mountPath), "PKI Engine: Vault PKI mount path is missing"); + Assert.isTrue(!StringUtils.endsWith(mountPath, "/"), "PKI Engine: Vault PKI mount path must not end with trailing slash, current value: " + mountPath); + + _vaultMountPath = mountPath + "/%s"; + + String certificateBrand = configs.get(PkiConfig.CertificateBrand.key()); + _certificateCommonName = configs.get(PkiConfig.CertificateCommonName.key()).replaceAll("__BRAND__", certificateBrand); + + _vaultPkiTtl = configs.get(PkiConfig.VaultPkiTtl.key()); + Assert.isTrue(StringUtils.isNotEmpty(_vaultPkiTtl), "PKI Engine: Vault PKI TTL is missing"); + + _vaultCATtl = configs.get(PkiConfig.VaultCATtl.key()); + Assert.isTrue(StringUtils.isNotEmpty(_vaultCATtl), "PKI Engine: Vault PKI root CA TTL is missing"); + + _vaultRoleTtl = configs.get(PkiConfig.VaultRoleTtl.key()); + Assert.isTrue(StringUtils.isNotEmpty(_vaultRoleTtl), "PKI Engine: Vault PKI role TTL is missing"); + } + + /* (non-Javadoc) + * @see org.apache.cloudstack.pki.PkiEngine#issueCertificate(com.cloud.domain.Domain, com.cloud.utils.net.Ip) + */ + @Override + public PkiDetail issueCertificate(Domain domain, Ip publicIp) throws VaultException { + Assert.notNull(domain, "PKI Engine: Cannot issue Certificate because domain is null"); + + Vault vault = new VaultBuilder().build(); + + createRoleIfMissing(vault, domain); + + final String path = String.format(_vaultMountPath, domain.getUuid()); + Pki pki = vault.pki(path); + + PkiResponse response = pki.issue(_vaultRoleName, publicIp.addr(), null, Arrays.asList(publicIp.addr()), null, null); + Credential credential = response.getCredential(); + + if (response.getRestResponse().getStatus() == 404) { + throw new VaultException("Cannot find Vault PKI backend path for domain " + domain.getUuid()); + } + + return new PkiDetail() + .certificate(credential.getCertificate()) + .issuingCa(credential.getIssuingCa()) + .privateKey(credential.getPrivateKey()) + .privateKeyType(credential.getPrivateKeyType()) + .serialNumber(credential.getSerialNumber()); + } + + /* (non-Javadoc) + * @see org.apache.cloudstack.pki.PkiEngine#getCertificate(com.cloud.domain.Domain) + */ + @Override + public PkiDetail getCertificate(Domain domain) throws VaultException { + Assert.notNull(domain, "PKI Engine: Cannot get Certificate because domain is null"); + + Vault vault = new VaultBuilder().build(); + Logical logical = vault.logical(); + + final String path = String.format(_vaultMountPath, domain.getUuid()); + final String apiEndoint = new StringBuilder() + .append(path) + .append("/cert/ca") + .toString(); + + LogicalResponse response = logical.read(apiEndoint); + Map data = response.getData(); + + Assert.hasLength(data.get("certificate"), "PKI Engine: Cannot get Certificate, Vault response is empty"); + + return new PkiDetail().issuingCa(data.get("certificate")); + } + + /** + * Create Vault PKI role if it's missing or return the existing one + * + * @param vault object + * @param domain object + * + * @return newly created or existing Vault PKI role + * + * @throws VaultException + */ + private RoleOptions createRoleIfMissing(Vault vault, Domain domain) throws VaultException { + final String path = String.format(_vaultMountPath, domain.getUuid()); + Pki pki = vault.pki(path); + PkiResponse response = pki.getRole(_vaultRoleName); + RoleOptions role = response.getRoleOptions(); + + // role does exist + if (response.getRestResponse().getStatus() == 200) { + return role; + } + + createMountPointIfMissing(vault, domain); + createRootCertIfMissing(vault, domain); + createConfigUrlIfMissing(vault, domain); + + // create new role + RoleOptions options = new RoleOptions() + .allowAnyName(true) + .ttl(_vaultRoleTtl); + + return pki.createOrUpdateRole(_vaultRoleName, options).getRoleOptions(); + } + + /** + * Create Vault PKI engine mount point if it's missing + * + * @param vault object + * @param domain object + * + * @throws VaultException + */ + private void createMountPointIfMissing(Vault vault, Domain domain) throws VaultException { + final String sysMountBase = "sys/mounts"; + final String path = String.format(_vaultMountPath, domain.getUuid()); + final String apiEndpoint = new StringBuilder() + .append(sysMountBase) + .append("/") + .append(path) + .toString(); + + try { + vault.logical().read(sysMountBase + "/tune"); + return; + } catch (VaultException e) { + // mount point not found, continue to create it + } + + // create mount point + Map createPayload = ImmutableMap.of("type", "pki"); + vault.logical().write(apiEndpoint, createPayload); + + // tune mount point + Map tunePayload = ImmutableMap.of( + "default_lease_ttl", _vaultPkiTtl, + "max_lease_ttl", _vaultPkiTtl, + "description", domain.getName()); + vault.logical().write(apiEndpoint + "/tune", tunePayload); + } + + /** + * Create Vault root Certificate CA if it's missing + * + * @param vault object + * @param domain object + * + * @throws VaultException + */ + private void createRootCertIfMissing(Vault vault, Domain domain) throws VaultException { + String path = String.format(_vaultMountPath, domain.getUuid()); + final String apiEndpoint = new StringBuilder() + .append(path) + .append("/root/generate/internal") + .toString(); + + final String commonName = _certificateCommonName.replaceAll("__DOMAIN__", domain.getName()); + Map payload = ImmutableMap.of("common_name", commonName, "ttl", _vaultCATtl); + + vault.logical().write(apiEndpoint, payload); + } + + /** + * create Vault PKI CRL config URLs if they are missing + * + * @param vault object + * @param domain object + * + * @throws VaultException + */ + private void createConfigUrlIfMissing(Vault vault, Domain domain) throws VaultException { + final String path = String.format(_vaultMountPath, domain.getUuid()); + final String apiEndpoint = new StringBuilder() + .append(path) + .append("/config/urls") + .toString(); + + try { + vault.logical().read(apiEndpoint); + return; + } catch (VaultException e) { + // config urls for this pki endpoint don't exist, continue to create them + } + + String caUrl; + + if (StringUtils.isEmpty(_vaultCertUrl)) { + caUrl = ""; + } else if (_vaultCertUrl.equals(DEFAULT_FAULT_CERT_URL)) { + caUrl = new StringBuilder() + .append(_vaultUrl) + .append("/v1/") + .append(path) + .append("/ca") + .toString(); + } else { + caUrl = new StringBuilder() + .append(_vaultCertUrl) + .append(StringUtils.endsWith(_vaultCertUrl, "/") ? "" : "/") + .append(domain.getUuid()) + .toString(); + } + + String crlUrl; + + if (StringUtils.isEmpty(_vaultCrlUrl)) { + crlUrl = ""; + } else if (_vaultCrlUrl.equals(DEFAULT_FAULT_CRL_URL)) { + crlUrl = new StringBuilder() + .append(_vaultUrl) + .append("/v1/") + .append(path) + .append("/crl") + .toString(); + } else { + crlUrl = new StringBuilder() + .append(_vaultCrlUrl) + .append(StringUtils.endsWith(_vaultCrlUrl, "/") ? "" : "/") + .append(domain.getUuid()) + .toString(); + } + + // create CRL config urls + Map createPayload = ImmutableMap.of("issuing_certificates", caUrl, "crl_distribution_points", crlUrl); + vault.logical().write(apiEndpoint, createPayload); + } + + /** + * Vault object builder + */ + private class VaultBuilder { + private VaultBuilder() { + } + + /** + * Build Vault object based on provided information and scenarios + * + * 1) Vault Token is provided: create VaultConfig and Vault object right away + * 2) Vault Token is not provided: fetching Vault Token based on provided RoleId and SecretId + * + * @return Vault object containing client token (provided or fetched) + * + * @throws VaultException + */ + public Vault build() throws VaultException { + final VaultConfig config = new VaultConfig() + .address(_vaultUrl) + .token(_vaultToken) + .sslConfig(new SslConfig().verify(_vaultVerifySsl)) + .openTimeout(OPEN_CONNECTION_TIMEOUT_SECONDS) + .readTimeout(READ_CONNECTION_TIMEOUT_SECONDS) + .build(); + + // Vault Token is provided, Vault object can be initialized right away + if (StringUtils.isNotEmpty(_vaultToken)) { + return new Vault(config).withRetries(RETRY_COUNT, RETRY_INTERVAL_MILISECONDS); + } + + // Vault Token is not provided, but AppRole information is. + // We're going to fetch client token through REST API call. + AuthResponse response = new Vault(config).auth().loginByAppRole(_vaultTokenRoleId, _vaultTokenSecretId); + + // putting back client token on VaultConfig for further use + config.token(response.getAuthClientToken()); + + return new Vault(config).withRetries(RETRY_COUNT, RETRY_INTERVAL_MILISECONDS); + } + } +} diff --git a/server/src/main/java/org/apache/cloudstack/pki/PkiManagerImpl.java b/server/src/main/java/org/apache/cloudstack/pki/PkiManagerImpl.java new file mode 100644 index 000000000000..bad29a4c0c63 --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/pki/PkiManagerImpl.java @@ -0,0 +1,103 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.pki; + +import java.util.Map; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.commons.lang.BooleanUtils; + +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; + +import com.cloud.domain.Domain; +import com.cloud.exception.RemoteAccessVpnException; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.net.Ip; + +/** + * PKI Manager base class. This will work as a factory to construct Vault or Default + * implementation and pass through the API call to corresponding implementation. + * + * @author Khosrow Moossavi + * @since 4.10.0.228-cca + */ +public class PkiManagerImpl extends ManagerBase implements PkiManager, Configurable { + @Inject + private ConfigurationDao configDao; + + private PkiEngine pkiEngine; + + /* (non-Javadoc) + * @see com.cloud.utils.component.ComponentLifecycleBase#configure(java.lang.String, java.util.Map) + */ + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + Map configs = configDao.getConfiguration(params); + + if (BooleanUtils.toBoolean(configs.get(PkiConfig.VaultEnabled.key()))) { + pkiEngine = new PkiEngineVault(configs); + } else { + pkiEngine = new PkiEngineDefault(configs); + } + + return true; + } + + /* (non-Javadoc) + * @see org.apache.cloudstack.framework.config.Configurable#getConfigComponentName() + */ + @Override + public String getConfigComponentName() { + return PkiManager.class.getSimpleName(); + } + + /* (non-Javadoc) + * @see org.apache.cloudstack.framework.config.Configurable#getConfigKeys() + */ + @Override + public ConfigKey[] getConfigKeys() { + return PkiConfig.asConfigKeys(); + } + + /* (non-Javadoc) + * @see org.apache.cloudstack.pki.PkiManager#issueCertificate(com.cloud.domain.Domain, com.cloud.utils.net.Ip) + */ + @Override + public PkiDetail issueCertificate(Domain domain, Ip publicIp) throws RemoteAccessVpnException { + try { + return pkiEngine.issueCertificate(domain, publicIp); + } catch (Exception e) { + throw new RemoteAccessVpnException(e.getMessage()); + } + } + + /* (non-Javadoc) + * @see org.apache.cloudstack.pki.PkiManager#getCertificate(com.cloud.domain.Domain) + */ + @Override + public PkiDetail getCertificate(Domain domain) throws RemoteAccessVpnException { + try { + return pkiEngine.getCertificate(domain); + } catch (Exception e) { + throw new RemoteAccessVpnException(e.getMessage()); + } + } +} diff --git a/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml b/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml index 9c0e9a125f93..1caa90cfd9eb 100644 --- a/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml +++ b/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml @@ -179,6 +179,8 @@ + + diff --git a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java index d0943ad5281e..a17352d7d6fe 100644 --- a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java +++ b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java @@ -25,6 +25,7 @@ import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.nullable; +import static org.mockito.AdditionalAnswers.returnsFirstArg; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; @@ -105,7 +106,9 @@ import com.cloud.network.dao.PhysicalNetworkVO; import com.cloud.projects.ProjectManager; import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.Storage; import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; import com.cloud.user.AccountManager; @@ -114,6 +117,7 @@ import com.cloud.user.User; import com.cloud.user.UserVO; import com.cloud.user.dao.AccountDao; +import com.cloud.user.dao.UserDao; import com.cloud.utils.db.Filter; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.TransactionLegacy; @@ -184,11 +188,15 @@ public class ConfigurationManagerTest { @Mock ConfigurationDao _configDao; @Mock + DiskOfferingDao diskOfferingDao; + @Mock DiskOfferingVO diskOfferingVOMock; @Mock DataCenterGuestIpv6PrefixDao dataCenterGuestIpv6PrefixDao; @Mock Ipv6GuestPrefixSubnetNetworkMapDao ipv6GuestPrefixSubnetNetworkMapDao; + @Mock + UserDao _userDao; VlanVO vlan = new VlanVO(Vlan.VlanType.VirtualNetwork, "vlantag", "vlangateway", "vlannetmask", 1L, "iprange", 1L, 1L, null, null, null); @@ -909,6 +917,356 @@ public void hasSameSubnetTest() { Assert.assertTrue(result); } + @Test + public void testCreateDiskOfferingNoIopsFixedSize(){ + + configurationMgr._accountDao = _accountDao; + configurationMgr._userDao = _userDao; + configurationMgr._diskOfferingDao = diskOfferingDao; + + UserVO userVO = Mockito.mock(UserVO.class); + AccountVO accountVO = Mockito.mock(AccountVO.class); + when(accountVO.getType()).thenReturn(Account.Type.ADMIN); + + when(configurationMgr._userDao.findById(anyLong())).thenReturn(userVO); + when(configurationMgr._accountDao.findById(anyLong())).thenReturn(accountVO); + when(configurationMgr._diskOfferingDao.persist(any(DiskOfferingVO.class))).then(returnsFirstArg()); + + DiskOfferingVO diskOfferingVO = configurationMgr.createDiskOffering(1L, null, null,"test-vol", "test-description", Storage.ProvisioningType.THIN.toString(), + 10L, null, false, false, false, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, false, null, null, null, null); + + Assert.assertEquals((10L * 1024 * 1024 * 1024), diskOfferingVO.getDiskSize()); + Assert.assertNull(diskOfferingVO.getMinIops()); + Assert.assertNull(diskOfferingVO.getMaxIops()); + Assert.assertFalse(diskOfferingVO.isCustomized()); + Assert.assertNull(diskOfferingVO.isCustomizedIops()); + Assert.assertNull(diskOfferingVO.getHighestMinIops()); + Assert.assertNull(diskOfferingVO.getHighestMaxIops()); + Assert.assertNull(diskOfferingVO.getMinIopsPerGb()); + Assert.assertNull(diskOfferingVO.getMaxIopsPerGb()); + } + + @Test + public void testCreateDiskOfferingFixedIopsFixedSizeNoHighest(){ + configurationMgr._accountDao = _accountDao; + configurationMgr._userDao = _userDao; + configurationMgr._diskOfferingDao = diskOfferingDao; + + UserVO userVO = Mockito.mock(UserVO.class); + AccountVO accountVO = Mockito.mock(AccountVO.class); + when(accountVO.getType()).thenReturn(Account.Type.ADMIN); + + when(configurationMgr._userDao.findById(anyLong())).thenReturn(userVO); + when(configurationMgr._accountDao.findById(anyLong())).thenReturn(accountVO); + when(configurationMgr._diskOfferingDao.persist(any(DiskOfferingVO.class))).then(returnsFirstArg()); + + DiskOfferingVO diskOfferingVO = configurationMgr.createDiskOffering(1L, null, null, "test-vol", "test-description", Storage.ProvisioningType.THIN.toString(), + 10L, null, false, false, false, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, false, null, null, null, null); + + Assert.assertEquals((10L * 1024 * 1024 * 1024), diskOfferingVO.getDiskSize()); + Assert.assertNull(diskOfferingVO.getMinIops()); + Assert.assertNull(diskOfferingVO.getMaxIops()); + Assert.assertFalse(diskOfferingVO.isCustomized()); + Assert.assertNull(diskOfferingVO.isCustomizedIops()); + Assert.assertNull(diskOfferingVO.getHighestMinIops()); + Assert.assertNull(diskOfferingVO.getHighestMaxIops()); + Assert.assertNull(diskOfferingVO.getMinIopsPerGb()); + Assert.assertNull(diskOfferingVO.getMaxIopsPerGb()); + } + + @Test + public void testCreateDiskOfferingNoIopsFixedSizeWithHighest(){ + configurationMgr._accountDao = _accountDao; + configurationMgr._userDao = _userDao; + configurationMgr._diskOfferingDao = diskOfferingDao; + boolean seenException = false; + + UserVO userVO = Mockito.mock(UserVO.class); + AccountVO accountVO = Mockito.mock(AccountVO.class); + when(accountVO.getType()).thenReturn(Account.Type.ADMIN); + + when(configurationMgr._userDao.findById(anyLong())).thenReturn(userVO); + when(configurationMgr._accountDao.findById(anyLong())).thenReturn(accountVO); + when(configurationMgr._diskOfferingDao.persist(any(DiskOfferingVO.class))).then(returnsFirstArg()); + + try { + DiskOfferingVO diskOfferingVO = configurationMgr.createDiskOffering(1L, null, null, "test-vol", "test-description", Storage.ProvisioningType.THIN.toString(), + 10L, null, false, false, false, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, false, null, null, 1000L, 5000L); + } catch (InvalidParameterValueException e) { + Assert.assertTrue(e.toString().contains("highestminops specified but none of customizediops or miniopspergb specified")); + seenException = true; + } + + Assert.assertTrue("InvalidParameterValueException expected but got no exception", seenException); + } + + @Test + public void testCreateDiskOfferingFixedIopsFixedSizeWithHighest(){ + configurationMgr._accountDao = _accountDao; + configurationMgr._userDao = _userDao; + configurationMgr._diskOfferingDao = diskOfferingDao; + boolean seenException = false; + + UserVO userVO = Mockito.mock(UserVO.class); + AccountVO accountVO = Mockito.mock(AccountVO.class); + when(accountVO.getType()).thenReturn(Account.Type.ADMIN); + + when(configurationMgr._userDao.findById(anyLong())).thenReturn(userVO); + when(configurationMgr._accountDao.findById(anyLong())).thenReturn(accountVO); + when(configurationMgr._diskOfferingDao.persist(any(DiskOfferingVO.class))).then(returnsFirstArg()); + + try { + DiskOfferingVO diskOfferingVO = configurationMgr.createDiskOffering(1L, null, null, "test-vol", "test-description", Storage.ProvisioningType.THIN.toString(), + 10L, null, false, false, false, null, 1000L, 5000L, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, false, null, null, 1000L, 5000L); + } catch (InvalidParameterValueException e) { + Assert.assertTrue(e.toString().contains("highestminops specified but none of customizediops or miniopspergb specified")); + seenException = true; + } + + Assert.assertTrue("InvalidParameterValueException expected but got no exception", seenException); + + } + + @Test + public void testCreateDiskOfferingCustomIopsFixedSizeNoHighest(){ + configurationMgr._accountDao = _accountDao; + configurationMgr._userDao = _userDao; + configurationMgr._diskOfferingDao = diskOfferingDao; + + UserVO userVO = Mockito.mock(UserVO.class); + AccountVO accountVO = Mockito.mock(AccountVO.class); + when(accountVO.getType()).thenReturn(Account.Type.ADMIN); + + when(configurationMgr._userDao.findById(anyLong())).thenReturn(userVO); + when(configurationMgr._accountDao.findById(anyLong())).thenReturn(accountVO); + when(configurationMgr._diskOfferingDao.persist(any(DiskOfferingVO.class))).then(returnsFirstArg()); + + DiskOfferingVO diskOfferingVO = configurationMgr.createDiskOffering(1L, null, null, "test-vol", "test-description", Storage.ProvisioningType.THIN.toString(), + 10L, null, false, false, false, true, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, false, null, null, null, null); + + Assert.assertEquals((10L * 1024 * 1024 * 1024), diskOfferingVO.getDiskSize()); + Assert.assertNull(diskOfferingVO.getMinIops()); + Assert.assertNull(diskOfferingVO.getMaxIops()); + Assert.assertFalse(diskOfferingVO.isCustomized()); + Assert.assertTrue(diskOfferingVO.isCustomizedIops()); + Assert.assertNull(diskOfferingVO.getHighestMinIops()); + Assert.assertNull(diskOfferingVO.getHighestMaxIops()); + Assert.assertNull(diskOfferingVO.getMinIopsPerGb()); + Assert.assertNull(diskOfferingVO.getMaxIopsPerGb()); + + } + + @Test + public void testCreateDiskOfferingCustomIopsFixedSizeWithHighest(){ + configurationMgr._accountDao = _accountDao; + configurationMgr._userDao = _userDao; + configurationMgr._diskOfferingDao = diskOfferingDao; + DiskOfferingVO diskOfferingVO = null; + UserVO userVO = Mockito.mock(UserVO.class); + AccountVO accountVO = Mockito.mock(AccountVO.class); + when(accountVO.getType()).thenReturn(Account.Type.ADMIN); + + when(configurationMgr._userDao.findById(anyLong())).thenReturn(userVO); + when(configurationMgr._accountDao.findById(anyLong())).thenReturn(accountVO); + when(configurationMgr._diskOfferingDao.persist(any(DiskOfferingVO.class))).then(returnsFirstArg()); + + Long testHighestMinIops = 1000L; + Long testHighestMaxIops = 5000L; + boolean seenException = false; + + diskOfferingVO = configurationMgr.createDiskOffering(1L, null, null, "test-vol", "test-description", Storage.ProvisioningType.THIN.toString(), + 10L, null, false, false, false, true, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, false, null, null, testHighestMinIops, testHighestMaxIops); + + Assert.assertEquals((10L * 1024 * 1024 * 1024), diskOfferingVO.getDiskSize()); + Assert.assertNull(diskOfferingVO.getMinIops()); + Assert.assertNull(diskOfferingVO.getMaxIops()); + Assert.assertFalse(diskOfferingVO.isCustomized()); + Assert.assertTrue(diskOfferingVO.isCustomizedIops()); + Assert.assertEquals(testHighestMinIops, diskOfferingVO.getHighestMinIops()); + Assert.assertEquals(testHighestMaxIops, diskOfferingVO.getHighestMaxIops()); + Assert.assertNull(diskOfferingVO.getMinIopsPerGb()); + Assert.assertNull(diskOfferingVO.getMaxIopsPerGb()); + + // highestminiops specified but no highestmaxiops + try { + diskOfferingVO = configurationMgr.createDiskOffering(1L, null, null, "test-vol", "test-description", Storage.ProvisioningType.THIN.toString(), + 10L, null, false, false, false, true, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, false, null, null, testHighestMinIops, null); + }catch (InvalidParameterValueException e) { + Assert.assertTrue("Incorrect exception raised", e.toString().contains("Both highestminiops and highestmaxiops should be specified")); + seenException = true; + } + Assert.assertTrue("Expected to raise an exception, but no exception was raised", seenException); + + // highestmaxiops specified but no highestminiops + seenException = false; + try { + diskOfferingVO = configurationMgr.createDiskOffering(1L, null, null, "test-vol", "test-description", Storage.ProvisioningType.THIN.toString(), + 10L, null, false, false, false, true, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, false, null, null, null, testHighestMaxIops); + }catch (InvalidParameterValueException e) { + Assert.assertTrue("Incorrect exception raised", e.toString().contains("Both highestminiops and highestmaxiops should be specified")); + seenException = true; + } + Assert.assertTrue("Expected to raise an exception, but no exception was raised", seenException); + + // highest min > highest max + testHighestMinIops = 5000L; + testHighestMaxIops = 1000L; + seenException = false; + try { + diskOfferingVO = configurationMgr.createDiskOffering(1L, null, null, "test-vol", "test-description", Storage.ProvisioningType.THIN.toString(), + 10L, null, false, false, false, true, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, false, null, null, testHighestMinIops, testHighestMaxIops); + }catch (InvalidParameterValueException e) { + Assert.assertTrue("Incorrect exception raised", e.toString().contains("highestminiops must be less than highestmaxiops")); + seenException = true; + } + Assert.assertTrue("Expected to raise an exception, but no exception was raised", seenException); + + //non positive value for highestMinIops + testHighestMinIops = -1L; + testHighestMaxIops = 1000L; + seenException = false; + try { + diskOfferingVO = configurationMgr.createDiskOffering(1L, null, null, "test-vol", "test-description", Storage.ProvisioningType.THIN.toString(), + 10L, null, false, false, false, true, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, false, null, null, testHighestMinIops, testHighestMaxIops); + }catch (InvalidParameterValueException e) { + Assert.assertTrue("Incorrect exception raised", e.toString().contains("highestminiops/highestmaxiops value must be greater than 0")); + seenException = true; + } + Assert.assertTrue("Expected to raise an exception, but no exception was raised", seenException); + } + + @Test + public void testCreateDiskOfferingCustomIopsFixedSizeWithIopsGb(){ + configurationMgr._accountDao = _accountDao; + configurationMgr._userDao = _userDao; + configurationMgr._diskOfferingDao = diskOfferingDao; + DiskOfferingVO diskOfferingVO = null; + UserVO userVO = Mockito.mock(UserVO.class); + AccountVO accountVO = Mockito.mock(AccountVO.class); + when(accountVO.getType()).thenReturn(Account.Type.ADMIN); + + when(configurationMgr._userDao.findById(anyLong())).thenReturn(userVO); + when(configurationMgr._accountDao.findById(anyLong())).thenReturn(accountVO); + when(configurationMgr._diskOfferingDao.persist(any(DiskOfferingVO.class))).then(returnsFirstArg()); + + Long testMinIopsPerGb = 10L; + Long testMaxIopsPerGb = 50L; + boolean seenException = false; + + try { + diskOfferingVO = configurationMgr.createDiskOffering(1L, null, null, "test-vol", "test-description", Storage.ProvisioningType.THIN.toString(), + 10L, null, false, false, false, true, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, false, testMinIopsPerGb, testMaxIopsPerGb, null, null); + } catch (InvalidParameterValueException e) { + Assert.assertTrue("Incorrect exception raised:" + e.toString(), e.toString().contains("Cannot set Min/Max IOPS/GB for a fixed size disk offering")); + seenException = true; + } + Assert.assertTrue("Expected to raise an exception, but no exception was raised", seenException); + } + + @Test + public void testCreateDiskOfferingFixedIopsOrCustomIopsWithIopsGb(){ + configurationMgr._accountDao = _accountDao; + configurationMgr._userDao = _userDao; + configurationMgr._diskOfferingDao = diskOfferingDao; + DiskOfferingVO diskOfferingVO = null; + UserVO userVO = Mockito.mock(UserVO.class); + AccountVO accountVO = Mockito.mock(AccountVO.class); + when(accountVO.getType()).thenReturn(Account.Type.ADMIN); + + when(configurationMgr._userDao.findById(anyLong())).thenReturn(userVO); + when(configurationMgr._accountDao.findById(anyLong())).thenReturn(accountVO); + when(configurationMgr._diskOfferingDao.persist(any(DiskOfferingVO.class))).then(returnsFirstArg()); + + Long testMinIopsPerGb = 10L; + Long testMaxIopsPerGb = 50L; + boolean seenException = false; + + //fixed iops, custom size + try { + diskOfferingVO = configurationMgr.createDiskOffering(1L, null, null, "test-vol", "test-description", Storage.ProvisioningType.THIN.toString(), + null, null, true, false, false, null, 1000L, 5000L, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, false, testMinIopsPerGb, testMaxIopsPerGb, null, null); + } catch (InvalidParameterValueException e) { + Assert.assertTrue("Incorrect exception raised:" + e.toString(), e.toString().contains("Cannot set Min/Max IOPS/GB with either custom IOPS or fixed IOPS")); + seenException = true; + } + Assert.assertTrue("Expected to raise an exception, but no exception was raised", seenException); + + + //custom iops, custom size + seenException = false; + try { + diskOfferingVO = configurationMgr.createDiskOffering(1L, null, null, "test-vol", "test-description", Storage.ProvisioningType.THIN.toString(), + 10L, null, true, false, false, true, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, false, testMinIopsPerGb, testMaxIopsPerGb, null, null); + } catch (InvalidParameterValueException e) { + Assert.assertTrue("Incorrect exception raised:" + e.toString(), e.toString().contains("Cannot set Min/Max IOPS/GB with either custom IOPS or fixed IOPS")); + seenException = true; + } + Assert.assertTrue("Expected to raise an exception, but no exception was raised", seenException); + } + + @Test + public void testCreateDiskOfferingCustomSizeWithIopsGb(){ + configurationMgr._accountDao = _accountDao; + configurationMgr._userDao = _userDao; + configurationMgr._diskOfferingDao = diskOfferingDao; + DiskOfferingVO diskOfferingVO = null; + UserVO userVO = Mockito.mock(UserVO.class); + AccountVO accountVO = Mockito.mock(AccountVO.class); + when(accountVO.getType()).thenReturn(Account.Type.ADMIN); + + when(configurationMgr._userDao.findById(anyLong())).thenReturn(userVO); + when(configurationMgr._accountDao.findById(anyLong())).thenReturn(accountVO); + when(configurationMgr._diskOfferingDao.persist(any(DiskOfferingVO.class))).then(returnsFirstArg()); + + Long testMinIopsPerGb = 10L; + Long testMaxIopsPerGb = 50L; + boolean seenException = false; + + diskOfferingVO = configurationMgr.createDiskOffering(1L, null, null, "test-vol", "test-description", Storage.ProvisioningType.THIN.toString(), + null, null, true, false, false, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, false, testMinIopsPerGb, testMaxIopsPerGb, null, null); + + Assert.assertNull(diskOfferingVO.getMinIops()); + Assert.assertNull(diskOfferingVO.getMaxIops()); + Assert.assertTrue(diskOfferingVO.isCustomized()); + Assert.assertNull(diskOfferingVO.isCustomizedIops()); + Assert.assertEquals(testMinIopsPerGb, diskOfferingVO.getMinIopsPerGb()); + Assert.assertEquals(testMaxIopsPerGb, diskOfferingVO.getMaxIopsPerGb()); + Assert.assertNull(diskOfferingVO.getHighestMinIops()); + Assert.assertNull(diskOfferingVO.getHighestMaxIops()); + } + + @Test + public void testCreateDiskOfferingCustomSizeWithIopsGbWithHighest(){ + configurationMgr._accountDao = _accountDao; + configurationMgr._userDao = _userDao; + configurationMgr._diskOfferingDao = diskOfferingDao; + DiskOfferingVO diskOfferingVO = null; + UserVO userVO = Mockito.mock(UserVO.class); + AccountVO accountVO = Mockito.mock(AccountVO.class); + when(accountVO.getType()).thenReturn(Account.Type.ADMIN); + + when(configurationMgr._userDao.findById(anyLong())).thenReturn(userVO); + when(configurationMgr._accountDao.findById(anyLong())).thenReturn(accountVO); + when(configurationMgr._diskOfferingDao.persist(any(DiskOfferingVO.class))).then(returnsFirstArg()); + + Long testMinIopsPerGb = 10L; + Long testMaxIopsPerGb = 50L; + Long testHighestMinIops = 500L; + Long testHighestMaxIops = 1000L; + + diskOfferingVO = configurationMgr.createDiskOffering(1L, null, null, "test-vol", "test-description", Storage.ProvisioningType.THIN.toString(), + null, null, true, false, false, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, false, testMinIopsPerGb, testMaxIopsPerGb, testHighestMinIops, testHighestMaxIops); + + Assert.assertNull(diskOfferingVO.getMinIops()); + Assert.assertNull(diskOfferingVO.getMaxIops()); + Assert.assertTrue(diskOfferingVO.isCustomized()); + Assert.assertNull(diskOfferingVO.isCustomizedIops()); + Assert.assertEquals(testMinIopsPerGb, diskOfferingVO.getMinIopsPerGb()); + Assert.assertEquals(testMaxIopsPerGb, diskOfferingVO.getMaxIopsPerGb()); + Assert.assertEquals(testHighestMinIops, diskOfferingVO.getHighestMinIops()); + Assert.assertEquals(testHighestMaxIops, diskOfferingVO.getHighestMaxIops()); + } + @Test(expected = CloudRuntimeException.class) public void testGetVlanNumberFromUriInvalidParameter() { configurationMgr.getVlanNumberFromUri("vlan"); diff --git a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java index 5b9875bc61e0..761ffa0b70e6 100644 --- a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java +++ b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java @@ -35,11 +35,6 @@ import java.util.UUID; import java.util.concurrent.ExecutionException; -import com.cloud.api.query.dao.ServiceOfferingJoinDao; -import com.cloud.api.query.vo.ServiceOfferingJoinVO; -import com.cloud.service.ServiceOfferingVO; -import com.cloud.service.dao.ServiceOfferingDao; -import com.cloud.storage.dao.VMTemplateDao; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; @@ -75,6 +70,9 @@ import org.mockito.runners.MockitoJUnitRunner; import org.springframework.test.util.ReflectionTestUtils; +import com.cloud.api.query.dao.ServiceOfferingJoinDao; +import com.cloud.api.query.vo.ServiceOfferingJoinVO; +import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.Resource; import com.cloud.configuration.Resource.ResourceType; import com.cloud.dc.DataCenterVO; @@ -86,8 +84,13 @@ import com.cloud.org.Grouping; import com.cloud.serializer.GsonHelper; import com.cloud.server.TaggedResourceService; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.Volume.Type; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.StoragePoolTagsDao; +import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.user.Account; @@ -162,6 +165,10 @@ public class VolumeApiServiceImplTest { private ServiceOfferingJoinDao serviceOfferingJoinDao; @Mock private ServiceOfferingDao serviceOfferingDao; + @Mock + private DiskOfferingDao _diskOfferingDao; + @Mock + private ConfigurationManager _configMgr; private DetachVolumeCmd detachCmd = new DetachVolumeCmd(); private Class _detachCmdClass = detachCmd.getClass(); @@ -171,6 +178,9 @@ public class VolumeApiServiceImplTest { private long storagePoolMockId = 1; @Mock private DiskOfferingVO newDiskOfferingMock; + @Mock + private SnapshotDao _snapshotDao; + @Mock private VolumeVO volumeVoMock; @@ -529,6 +539,247 @@ public void testResourceLimitCheckForUploadedVolume() throws NoSuchFieldExceptio } } +// @Test +// public void testResizeVolumeFromFixedSizeToCustomSize() throws NoSuchFieldException, IllegalAccessException { +// ResizeVolumeCmd resizeVolumeCmd = Mockito.mock(ResizeVolumeCmd.class); +// when(resizeVolumeCmd.getNewDiskOfferingId()).thenReturn(2L); +// when(resizeVolumeCmd.getEntityId()).thenReturn(1L); +// when(resizeVolumeCmd.getSize()).thenReturn(10L); +// +// VolumeVO volumeVO = new VolumeVO(Volume.Type.DATADISK, "test-vol", 1, 1, 1, 1, Storage.ProvisioningType.THIN, +// 10L, null, null, null); +// Field IdField = VolumeVO.class.getDeclaredField("id"); +// IdField.setAccessible(true); +// IdField.set(volumeVO, 1L); +// +// ReflectionTestUtils.setField(volumeApiServiceImpl, "_maxVolumeSizeInGb", 2 * 1024); +// +// DiskOfferingVO diskOfferingVO = new DiskOfferingVO("fixed-size","fixed-size", Storage.ProvisioningType.THIN, +// 10L, "", false, false, null, null, null); +// DiskOfferingVO newDiskOfferingVO = new DiskOfferingVO("custom-size", "custom-size", Storage.ProvisioningType.THIN, +// 0L, "", true, false, null, null, null); +// +// VolumeVO newVolume = null; +// +// when(volumeDaoMock.findById(1L)).thenReturn(volumeVO); +// when(volumeDaoMock.getHypervisorType(1L)).thenReturn(HypervisorType.XenServer); +// when(volumeDaoMock.update(anyLong(), any(VolumeVO.class))).thenReturn(true); +// +// when(_vmSnapshotDao.findByVm(anyLong())).thenReturn(new ArrayList()); +// +// when(_diskOfferingDao.findById(1L)).thenReturn(diskOfferingVO); +// when(_diskOfferingDao.findById(2L)).thenReturn(newDiskOfferingVO); +// doNothing().when(_configMgr).checkDiskOfferingAccess(any(Account.class), any(DiskOffering.class), any(DataCenter.class)); +// +// try { +// newVolume = volumeApiServiceImpl.resizeVolume(resizeVolumeCmd); +// Assert.assertEquals(Long.valueOf(2L), newVolume.getDiskOfferingId()); +// } catch (ResourceAllocationException e) { +// Assert.fail(e.getMessage()); +// } +// } + +// @Test +// public void testResizeVolumeFromCustomSizeFixedIopsToFixedSize() throws NoSuchFieldException, IllegalAccessException { +// ResizeVolumeCmd resizeVolumeCmd = Mockito.mock(ResizeVolumeCmd.class); +// when(resizeVolumeCmd.getNewDiskOfferingId()).thenReturn(2L); +// when(resizeVolumeCmd.getEntityId()).thenReturn(1L); +// when(resizeVolumeCmd.getSize()).thenReturn(10L); +// +// VolumeVO volumeVO = new VolumeVO(Volume.Type.DATADISK, "test-vol", 1, 1, 1, 1, Storage.ProvisioningType.THIN, +// 10L, null, null, null); +// Field IdField = VolumeVO.class.getDeclaredField("id"); +// IdField.setAccessible(true); +// IdField.set(volumeVO, 1L); +// +// ReflectionTestUtils.setField(volumeApiServiceImpl, "_maxVolumeSizeInGb", 2 * 1024); +// +// DiskOfferingVO diskOfferingVO = new DiskOfferingVO("custom-size-fixed-iops","custom-size-fixed-iops", +// Storage.ProvisioningType.THIN, 0L, "", true, false, 100L, 200L, null); +// DiskOfferingVO newDiskOfferingVO = new DiskOfferingVO("fixed-size", "fixed-size", Storage.ProvisioningType.THIN, +// 0L, "", true, false, null, null, null); +// +// VolumeVO newVolume = null; +// +// when(volumeDaoMock.findById(1L)).thenReturn(volumeVO); +// when(volumeDaoMock.getHypervisorType(1L)).thenReturn(HypervisorType.XenServer); +// when(volumeDaoMock.update(anyLong(), any(VolumeVO.class))).thenReturn(true); +// +// when(_vmSnapshotDao.findByVm(anyLong())).thenReturn(new ArrayList()); +// +// when(_diskOfferingDao.findById(1L)).thenReturn(diskOfferingVO); +// when(_diskOfferingDao.findById(2L)).thenReturn(newDiskOfferingVO); +// doNothing().when(_configMgr).checkDiskOfferingAccess(any(Account.class), any(DiskOffering.class), any(DataCenter.class)); +// +// try { +// newVolume = volumeApiServiceImpl.resizeVolume(resizeVolumeCmd); +// Assert.assertEquals(Long.valueOf(2L), newVolume.getDiskOfferingId()); +// } catch (ResourceAllocationException e) { +// Assert.fail(e.getMessage()); +// } +// } +// +// @Test +// public void testResizeVolumeFromFixedSizeFixedIopsToCustomSizeIopsPerGb() throws NoSuchFieldException, IllegalAccessException { +// Long newSize = 20L * 1024 * 1024 * 1024; +// Long newSizeGb = 20L; +// Long minIopsPerGb = 10L; +// Long maxIopsPerGb = 20L; +// Long newMinIops = newSizeGb * minIopsPerGb; +// Long newMaxIops = newSizeGb * maxIopsPerGb; +// +// ResizeVolumeCmd resizeVolumeCmd = Mockito.mock(ResizeVolumeCmd.class); +// when(resizeVolumeCmd.getNewDiskOfferingId()).thenReturn(2L); +// when(resizeVolumeCmd.getEntityId()).thenReturn(1L); +// when(resizeVolumeCmd.getSize()).thenReturn(newSizeGb); +// +// VolumeVO volumeVO = new VolumeVO(Volume.Type.DATADISK, "test-vol", 1, 1, 1, 1, Storage.ProvisioningType.THIN, +// 10L, 100L, 200L, null); +// Field IdField = VolumeVO.class.getDeclaredField("id"); +// IdField.setAccessible(true); +// IdField.set(volumeVO, 1L); +// +// ReflectionTestUtils.setField(volumeApiServiceImpl, "_maxVolumeSizeInGb", 2 * 1024); +// +// DiskOfferingVO diskOfferingVO = new DiskOfferingVO("fixed-size-fixed-iops","fixed-size-fixed-iops", +// Storage.ProvisioningType.THIN, 10L, "", false, false, 100L, 200L, null); +// DiskOfferingVO newDiskOfferingVO = new DiskOfferingVO("custom-size-iopspergb", "custom-size-iopspergb", Storage.ProvisioningType.THIN, +// 0L, "", true, false, null, null, null); +// newDiskOfferingVO.setMinIopsPerGb(10L); +// newDiskOfferingVO.setMaxIopsPerGb(20L); +// +// VolumeVO newVolume; +// +// when(volumeDaoMock.findById(1L)).thenReturn(volumeVO); +// when(volumeDaoMock.getHypervisorType(1L)).thenReturn(HypervisorType.XenServer); +// when(volumeDaoMock.update(anyLong(), any(VolumeVO.class))).thenReturn(true); +// +// when(_vmSnapshotDao.findByVm(anyLong())).thenReturn(new ArrayList()); +// +// when(_diskOfferingDao.findById(1L)).thenReturn(diskOfferingVO); +// when(_diskOfferingDao.findById(2L)).thenReturn(newDiskOfferingVO); +// doNothing().when(_configMgr).checkDiskOfferingAccess(any(Account.class), any(DiskOffering.class), any(DataCenter.class)); +// +// try { +// newVolume = volumeApiServiceImpl.resizeVolume(resizeVolumeCmd); +// Assert.assertEquals(newMinIops, newVolume.getMinIops()); +// Assert.assertEquals(newMaxIops, newVolume.getMaxIops()); +// Assert.assertEquals(newSize, newVolume.getSize()); +// Assert.assertEquals(Volume.State.Allocated, newVolume.getState()); +// } catch (ResourceAllocationException e) { +// Assert.fail(e.getMessage()); +// } +// +// } + +// @Test +// public void testResizeVolumeFromCustomSizeIopsPerGbToFixedSizeFixedIops() throws NoSuchFieldException, IllegalAccessException { +// Long newSize = 20L * 1024 * 1024 * 1024; +// Long newSizeGb = 20L; +// Long newMinIops = 500L; +// Long newMaxIops = 600L; +// +// ResizeVolumeCmd resizeVolumeCmd = Mockito.mock(ResizeVolumeCmd.class); +// when(resizeVolumeCmd.getNewDiskOfferingId()).thenReturn(2L); +// when(resizeVolumeCmd.getEntityId()).thenReturn(1L); +// +// VolumeVO volumeVO = new VolumeVO(Volume.Type.DATADISK, "test-vol", 1, 1, 1, 1, Storage.ProvisioningType.THIN, +// newSize, 100L, 200L, null); +// Field IdField = VolumeVO.class.getDeclaredField("id"); +// IdField.setAccessible(true); +// IdField.set(volumeVO, 1L); +// +// ReflectionTestUtils.setField(volumeApiServiceImpl, "_maxVolumeSizeInGb", 2 * 1024); +// +// DiskOfferingVO diskOfferingVO = new DiskOfferingVO(1L, "custom-size-iopspergb", "custom-size-iopspergb", Storage.ProvisioningType.THIN, +// 0L, "", true, false, null, null, null); +// diskOfferingVO.setMinIopsPerGb(10L); +// diskOfferingVO.setMaxIopsPerGb(20L); +// +// DiskOfferingVO newDiskOfferingVO = new DiskOfferingVO(1L,"fixed-size-fixed-iops","fixed-size-fixed-iops", +// Storage.ProvisioningType.THIN, newSize, "", false, false, newMinIops, newMaxIops, null); +// +// VolumeVO newVolume; +// +// when(volumeDaoMock.findById(1L)).thenReturn(volumeVO); +// when(volumeDaoMock.getHypervisorType(1L)).thenReturn(HypervisorType.XenServer); +// when(volumeDaoMock.update(anyLong(), any(VolumeVO.class))).thenReturn(true); +// +// when(_vmSnapshotDao.findByVm(anyLong())).thenReturn(new ArrayList()); +// +// when(_diskOfferingDao.findById(1L)).thenReturn(diskOfferingVO); +// when(_diskOfferingDao.findById(2L)).thenReturn(newDiskOfferingVO); +// doNothing().when(_configMgr).checkDiskOfferingAccess(any(Account.class), any(DiskOffering.class)); +// +// try { +// newVolume = volumeApiServiceImpl.resizeVolume(resizeVolumeCmd); +// Assert.assertEquals(newMinIops, newVolume.getMinIops()); +// Assert.assertEquals(newMaxIops, newVolume.getMaxIops()); +// Assert.assertEquals(newSize, newVolume.getSize()); +// Assert.assertEquals(Volume.State.Allocated, newVolume.getState()); +// } catch (ResourceAllocationException e) { +// Assert.fail(e.getMessage()); +// } +// } +// +// @Test +// public void testResizeVolumeFromCustomSizeIopsPerGbToCustomSizeIopsPerGb() throws NoSuchFieldException, IllegalAccessException { +// Long oldSize = 10L * 1024 * 1024 * 1024; +// Long oldSizeGb = 10L; +// Long oldMinIopsPerGb = 10L; +// Long oldMaxIopsPerGb = 30L; +// +// Long newSize = 10L * 1024 * 1024 * 1024; +// Long newSizeGb = 10L; +// Long newMinIopsPerGb = 20L; +// Long newMaxIopsPerGb = 50L; +// +// ResizeVolumeCmd resizeVolumeCmd = Mockito.mock(ResizeVolumeCmd.class); +// when(resizeVolumeCmd.getNewDiskOfferingId()).thenReturn(2L); +// when(resizeVolumeCmd.getEntityId()).thenReturn(1L); +// +// VolumeVO volumeVO = new VolumeVO(Volume.Type.DATADISK, "test-vol", 1, 1, 1, 1, Storage.ProvisioningType.THIN, +// oldSize, oldSizeGb * oldMinIopsPerGb, oldSizeGb * oldMaxIopsPerGb, null); +// +// Field IdField = VolumeVO.class.getDeclaredField("id"); +// IdField.setAccessible(true); +// IdField.set(volumeVO, 1L); +// +// ReflectionTestUtils.setField(volumeApiServiceImpl, "_maxVolumeSizeInGb", 2 * 1024); +// +// DiskOfferingVO diskOfferingVO = new DiskOfferingVO(1L, "custom-size-iopspergb-old", "custom-size-iopspergb-old", Storage.ProvisioningType.THIN, +// 0L, "", true, false, null, null, null); +// diskOfferingVO.setMinIopsPerGb(oldMinIopsPerGb); +// diskOfferingVO.setMaxIopsPerGb(oldMaxIopsPerGb); +// +// DiskOfferingVO newDiskOfferingVO = new DiskOfferingVO(1L,"custom-size-iopspergb-new","custom-size-iopspergb-new", +// Storage.ProvisioningType.THIN, newSize, "", false, false, null, null, null); +// newDiskOfferingVO.setMinIopsPerGb(newMinIopsPerGb); +// newDiskOfferingVO.setMaxIopsPerGb(newMaxIopsPerGb); +// +// VolumeVO newVolume; +// +// when(volumeDaoMock.findById(1L)).thenReturn(volumeVO); +// when(volumeDaoMock.getHypervisorType(1L)).thenReturn(HypervisorType.XenServer); +// when(volumeDaoMock.update(anyLong(), any(VolumeVO.class))).thenReturn(true); +// +// when(_vmSnapshotDao.findByVm(anyLong())).thenReturn(new ArrayList()); +// +// when(_diskOfferingDao.findById(1L)).thenReturn(diskOfferingVO); +// when(_diskOfferingDao.findById(2L)).thenReturn(newDiskOfferingVO); +// doNothing().when(_configMgr).checkDiskOfferingAccess(any(Account.class), any(DiskOffering.class)); +// +// try { +// newVolume = volumeApiServiceImpl.resizeVolume(resizeVolumeCmd); +// Assert.assertEquals(new Long(newMinIopsPerGb * newSizeGb), newVolume.getMinIops()); +// Assert.assertEquals(new Long(newMaxIopsPerGb * newSizeGb), newVolume.getMaxIops()); +// Assert.assertEquals(newSize, newVolume.getSize()); +// Assert.assertEquals(Volume.State.Allocated, newVolume.getState()); +// } catch (ResourceAllocationException e) { +// Assert.fail(e.getMessage()); +// } +// } + @After public void tearDown() { CallContext.unregister(); diff --git a/server/src/test/java/com/cloud/vpc/VpcApiUnitTest.java b/server/src/test/java/com/cloud/vpc/VpcApiUnitTest.java index 8899a045329e..7f09003db926 100644 --- a/server/src/test/java/com/cloud/vpc/VpcApiUnitTest.java +++ b/server/src/test/java/com/cloud/vpc/VpcApiUnitTest.java @@ -42,7 +42,7 @@ public class VpcApiUnitTest extends TestCase { @Inject VpcManagerImpl _vpcService = null; - VpcVO _vo = new VpcVO(1, "new vpc", "new vpc", 1, 1, 1, "0.0.0.0/0", "vpc domain", false, false, false); + VpcVO _vo = new VpcVO(1, "new vpc", "new vpc", 1, 1, 1, "0.0.0.0/0", "vpc domain", false, false, false, "0.0.0.0"); @Override @Before diff --git a/server/src/test/java/com/cloud/vpc/dao/MockVpcDaoImpl.java b/server/src/test/java/com/cloud/vpc/dao/MockVpcDaoImpl.java index f20de036f4c6..515bbac50287 100644 --- a/server/src/test/java/com/cloud/vpc/dao/MockVpcDaoImpl.java +++ b/server/src/test/java/com/cloud/vpc/dao/MockVpcDaoImpl.java @@ -96,9 +96,9 @@ public void persistVpcServiceProviders(long vpcId, Map> ser public VpcVO findById(Long id) { VpcVO vo = null; if (id.longValue() == 1) { - vo = new VpcVO(1, "new vpc", "new vpc", 1, 1, 1, "0.0.0.0/0", "vpc domain", false, false, false); + vo = new VpcVO(1, "new vpc", "new vpc", 1, 1, 1, "0.0.0.0/0", "vpc domain", false, false, false, "0.0.0.0"); } else if (id.longValue() == 2) { - vo = new VpcVO(1, "new vpc", "new vpc", 1, 1, 1, "0.0.0.0/0", "vpc domain", false, false, false); + vo = new VpcVO(1, "new vpc", "new vpc", 1, 1, 1, "0.0.0.0/0", "vpc domain", false, false, false, "0.0.0.0"); vo.setState(State.Inactive); } diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index 231eacc568a2..8654d37659c5 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -42,6 +42,8 @@ import java.net.UnknownHostException; import java.nio.file.Files; import java.nio.file.Path; +import java.security.KeyManagementException; +import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Arrays; @@ -51,6 +53,7 @@ import java.util.UUID; import javax.naming.ConfigurationException; +import javax.net.ssl.SSLContext; import org.apache.cloudstack.framework.security.keystore.KeystoreManager; import org.apache.cloudstack.storage.NfsMountManagerImpl.PathParser; @@ -82,10 +85,13 @@ import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.NameValuePair; -import org.apache.http.client.HttpClient; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.utils.URLEncodedUtils; -import org.apache.http.impl.client.DefaultHttpClient; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.TrustStrategy; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.ssl.SSLContextBuilder; import org.apache.log4j.Logger; import org.joda.time.DateTime; import org.joda.time.format.ISODateTimeFormat; @@ -277,6 +283,11 @@ public Answer executeRequest(Command cmd) { } else if (cmd instanceof UploadCommand) { return _upldMgr.handleUploadCommand(this, (UploadCommand)cmd); } else if (cmd instanceof CreateEntityDownloadURLCommand) { + if (((CreateEntityDownloadURLCommand) cmd).getParent() == null) { + String rootDir = getRootDir(((CreateEntityDownloadURLCommand) cmd).getSecUrl(),_nfsVersion); + String parent = rootDir.substring(rootDir.lastIndexOf("/")+1); + ((CreateEntityDownloadURLCommand) cmd).setParent(parent); + } return _upldMgr.handleCreateEntityURLCommand((CreateEntityDownloadURLCommand)cmd); } else if (cmd instanceof DeleteEntityDownloadURLCommand) { return _upldMgr.handleDeleteEntityDownloadURLCommand((DeleteEntityDownloadURLCommand)cmd); @@ -770,7 +781,12 @@ protected Answer copyFromSwiftToNfs(CopyCommand cmd, DataTO srcData, SwiftTO swi } } - File destFile = SwiftUtil.getObject(swiftTO, downloadDirectory, srcData.getPath()); + String filePath = downloadPath + File.separator + destData.getName(); + File destFile = new File(filePath); + if (!destFile.exists()) { + destFile = SwiftUtil.getObject(swiftTO, downloadDirectory, srcData.getPath()); + } + return postProcessing(destFile, downloadPath, destPath, srcData, destData); } catch (Exception e) { s_logger.debug("Failed to copy swift to nfs", e); @@ -1111,13 +1127,30 @@ private String determineStorageTemplatePath(final String storagePath, String dat } protected File downloadFromUrlToNfs(String url, NfsTO nfs, String path, String name) { - HttpClient client = new DefaultHttpClient(); - HttpGet get = new HttpGet(url); + CloseableHttpClient client = null; try { + //trust all certs + SSLContext sslContext = new SSLContextBuilder() + .loadTrustMaterial(null, (TrustStrategy) (chain, authType) -> true) + .build(); + client = HttpClients.custom().setSSLContext(sslContext) + .setSSLHostnameVerifier(new NoopHostnameVerifier()) + .build(); + } catch (NoSuchAlgorithmException e) { + e.printStackTrace(); + } catch (KeyManagementException e) { + e.printStackTrace(); + } catch (KeyStoreException e) { + e.printStackTrace(); + } + + try { + HttpGet get = new HttpGet(url); HttpResponse response = client.execute(get); HttpEntity entity = response.getEntity(); if (entity == null) { - s_logger.debug("Faled to get entity"); + s_logger.error("Failed to get entity "+ response.getStatusLine().getStatusCode() + " " + + response.getStatusLine().getReasonPhrase()); throw new CloudRuntimeException("Failed to get url: " + url); } @@ -1135,7 +1168,7 @@ protected File downloadFromUrlToNfs(String url, NfsTO nfs, String path, String n try (FileOutputStream outputStream = new FileOutputStream(destFile);) { entity.writeTo(outputStream); } catch (IOException e) { - s_logger.debug("downloadFromUrlToNfs:Exception:" + e.getMessage(), e); + s_logger.error("downloadFromUrlToNfs:Exception:" + e.getMessage(), e); } return new File(destFile.getAbsolutePath()); } catch (IOException e) { @@ -1178,13 +1211,13 @@ protected Answer registerTemplateOnSwift(DownloadCommand cmd) { try (FileInputStream fs = new FileInputStream(file)) { md5sum = DigestUtils.md5Hex(fs); } catch (IOException e) { - s_logger.debug("Failed to get md5sum: " + file.getAbsoluteFile()); + s_logger.error("Failed to get md5sum: " + file.getAbsoluteFile()); } DownloadAnswer answer = new DownloadAnswer(null, 100, null, VMTemplateStorageResourceAssoc.Status.DOWNLOADED, swiftPath, swiftPath, virtualSize, file.length(), md5sum); return answer; } catch (IOException e) { - s_logger.debug("Failed to register template into swift", e); + s_logger.error("Failed to register template into swift", e); return new DownloadAnswer(e.toString(), VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR); } finally { if (file != null) { @@ -1197,8 +1230,10 @@ private Answer execute(DownloadCommand cmd) { DataStoreTO dstore = cmd.getDataStore(); if (dstore instanceof NfsTO || dstore instanceof S3TO) { return _dlMgr.handleDownloadCommand(this, cmd); - } else if (dstore instanceof SwiftTO) { + } else if (dstore instanceof SwiftTO && cmd.getResourceType() == DownloadCommand.ResourceType.TEMPLATE) { return registerTemplateOnSwift(cmd); + } else if (dstore instanceof SwiftTO && cmd.getResourceType() == DownloadCommand.ResourceType.VOLUME){ + return _dlMgr.handleDownloadCommand(this, cmd); } else { return new Answer(cmd, false, "Unsupported image data store: " + dstore); } @@ -1266,7 +1301,7 @@ protected long getVirtualSize(File file, ImageFormat format) { processor.configure("template processor", params); return processor.getVirtualSize(file); } catch (Exception e) { - s_logger.warn("Failed to get virtual size of file " + file.getPath() + ", returning file size instead: ", e); + s_logger.error("Failed to get virtual size of file " + file.getPath() + ", returning file size instead: ", e); return file.length(); } @@ -1390,9 +1425,7 @@ protected Answer copyFromNfsToS3(CopyCommand cmd) { try { final String templatePath = determineStorageTemplatePath(srcStore.getUrl(), srcData.getPath(), _nfsVersion); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found " + srcData.getObjectType() + " from directory " + templatePath + " to upload to S3."); - } + s_logger.info("Found " + srcData.getObjectType() + " from directory " + templatePath + " to upload to S3."); final String bucket = s3.getBucketName(); File srcFile = findFile(templatePath); @@ -1552,7 +1585,7 @@ String swiftDownload(SwiftTO swift, String container, String rfilename, String l String result = command.execute(parser); if (result != null) { String errMsg = "swiftDownload failed err=" + result; - s_logger.warn(errMsg); + s_logger.error(errMsg); return errMsg; } if (parser.getLines() != null) { @@ -1560,7 +1593,7 @@ String swiftDownload(SwiftTO swift, String container, String rfilename, String l for (String line : lines) { if (line.contains("Errno") || line.contains("failed")) { String errMsg = "swiftDownload failed , err=" + parser.getLines(); - s_logger.warn(errMsg); + s_logger.error(errMsg); return errMsg; } } @@ -1578,7 +1611,7 @@ String swiftDownloadContainer(SwiftTO swift, String container, String ldir) { String result = command.execute(parser); if (result != null) { String errMsg = "swiftDownloadContainer failed err=" + result; - s_logger.warn(errMsg); + s_logger.error(errMsg); return errMsg; } if (parser.getLines() != null) { @@ -1586,7 +1619,7 @@ String swiftDownloadContainer(SwiftTO swift, String container, String ldir) { for (String line : lines) { if (line.contains("Errno") || line.contains("failed")) { String errMsg = "swiftDownloadContainer failed , err=" + parser.getLines(); - s_logger.warn(errMsg); + s_logger.error(errMsg); return errMsg; } } @@ -1629,7 +1662,7 @@ String swiftUpload(SwiftTO swift, String container, String lDir, String lFilenam String result = command.execute(parser); if (result != null) { String errMsg = "swiftUpload failed , err=" + result; - s_logger.warn(errMsg); + s_logger.error(errMsg); return errMsg; } if (parser.getLines() != null) { @@ -1637,7 +1670,7 @@ String swiftUpload(SwiftTO swift, String container, String lDir, String lFilenam for (String line : lines) { if (line.contains("Errno") || line.contains("failed")) { String errMsg = "swiftUpload failed , err=" + parser.getLines(); - s_logger.warn(errMsg); + s_logger.error(errMsg); return errMsg; } } @@ -1660,10 +1693,10 @@ String[] swiftList(SwiftTO swift, String container, String rFilename) { } else { if (result != null) { String errMsg = "swiftList failed , err=" + result; - s_logger.warn(errMsg); + s_logger.error(errMsg); } else { String errMsg = "swiftList failed, no lines returns"; - s_logger.warn(errMsg); + s_logger.error(errMsg); } } return null; @@ -1678,7 +1711,7 @@ String swiftDelete(SwiftTO swift, String container, String object) { String result = command.execute(parser); if (result != null) { String errMsg = "swiftDelete failed , err=" + result; - s_logger.warn(errMsg); + s_logger.error(errMsg); return errMsg; } if (parser.getLines() != null) { @@ -1686,7 +1719,7 @@ String swiftDelete(SwiftTO swift, String container, String object) { for (String line : lines) { if (line.contains("Errno") || line.contains("failed")) { String errMsg = "swiftDelete failed , err=" + parser.getLines(); - s_logger.warn(errMsg); + s_logger.error(errMsg); return errMsg; } } @@ -1713,7 +1746,7 @@ public Answer execute(DeleteSnapshotsDirCommand cmd) { String details = null; if (!snapshotDir.exists()) { details = "snapshot directory " + snapshotDir.getName() + " doesn't exist"; - s_logger.debug(details); + s_logger.info(details); return new Answer(cmd, true, details); } // delete all files in the directory @@ -1721,13 +1754,13 @@ public Answer execute(DeleteSnapshotsDirCommand cmd) { String result = deleteLocalFile(lPath); if (result != null) { String errMsg = "failed to delete all snapshots " + lPath + " , err=" + result; - s_logger.warn(errMsg); + s_logger.error(errMsg); return new Answer(cmd, false, errMsg); } // delete the directory if (!snapshotDir.delete()) { details = "Unable to delete directory " + snapshotDir.getName() + " under snapshot path " + relativeSnapshotPath; - s_logger.debug(details); + s_logger.error(details); return new Answer(cmd, false, details); } return new Answer(cmd, true, null); @@ -1759,7 +1792,7 @@ public Answer execute(DeleteSnapshotsDirCommand cmd) { String result = swiftDelete((SwiftTO)dstore, "V-" + volumeId.toString(), ""); if (result != null) { String errMsg = "failed to delete snapshot for volume " + volumeId + " , err=" + result; - s_logger.warn(errMsg); + s_logger.error(errMsg); return new Answer(cmd, false, errMsg); } return new Answer(cmd, true, "Deleted snapshot " + path + " from swift"); @@ -1941,7 +1974,7 @@ public void run() { ch.closeFuture().sync(); } catch (InterruptedException e) { s_logger.info("Failed to start post upload server"); - s_logger.debug("Exception while starting post upload server", e); + s_logger.error("Exception while starting post upload server", e); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManager.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManager.java index 48093f2cf1fe..70907146f736 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManager.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManager.java @@ -18,19 +18,19 @@ import java.util.Map; -import com.cloud.storage.template.Processor; import org.apache.cloudstack.storage.command.DownloadCommand; import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType; import org.apache.cloudstack.storage.resource.SecondaryStorageResource; import com.cloud.agent.api.storage.DownloadAnswer; -import com.cloud.utils.net.Proxy; import com.cloud.agent.api.to.S3TO; import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; +import com.cloud.storage.template.Processor; import com.cloud.storage.template.TemplateDownloader; import com.cloud.storage.template.TemplateProp; -import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.utils.component.Manager; +import com.cloud.utils.net.Proxy; public interface DownloadManager extends Manager { @@ -41,8 +41,8 @@ public interface DownloadManager extends Manager { * @param hvm whether the template is a hardware virtual machine * @param accountId the accountId of the iso owner (null if public iso) * @param descr description of the template - * @param user username used for authentication to the server - * @param password password used for authentication to the server + * @param userName username used for authentication to the server + * @param passwd password used for authentication to the server * @param maxDownloadSizeInBytes (optional) max download size for the template, in bytes. * @param resourceType signifying the type of resource like template, volume etc. * @return job-id that can be used to interrogate the status of the download. @@ -53,6 +53,8 @@ public String downloadPublicTemplate(long id, String url, String name, ImageForm public String downloadS3Template(S3TO s3, long id, String url, String name, ImageFormat format, boolean hvm, Long accountId, String descr, String cksum, String installPathPrefix, String user, String password, long maxTemplateSizeInBytes, Proxy proxy, ResourceType resourceType); + public String downloadSwiftVolume(DownloadCommand cmd, String installPathPrefix, long maxDownloadSizeInBytes); + Map getProcessors(); /** diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManagerImpl.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManagerImpl.java index e06d7da210db..20fb5ef4037b 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManagerImpl.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/DownloadManagerImpl.java @@ -16,6 +16,8 @@ // under the License. package org.apache.cloudstack.storage.template; +import static com.cloud.utils.NumbersUtil.toHumanReadableSize; + import java.io.File; import java.io.FileInputStream; import java.io.IOException; @@ -37,55 +39,55 @@ import javax.naming.ConfigurationException; -import com.cloud.agent.api.to.OVFInformationTO; -import com.cloud.storage.template.Processor; -import com.cloud.storage.template.S3TemplateDownloader; -import com.cloud.storage.template.TemplateDownloader; -import com.cloud.storage.template.TemplateLocation; -import com.cloud.storage.template.MetalinkTemplateDownloader; -import com.cloud.storage.template.HttpTemplateDownloader; -import com.cloud.storage.template.LocalTemplateDownloader; -import com.cloud.storage.template.ScpTemplateDownloader; -import com.cloud.storage.template.TemplateProp; -import com.cloud.storage.template.OVAProcessor; -import com.cloud.storage.template.IsoProcessor; -import com.cloud.storage.template.QCOW2Processor; -import com.cloud.storage.template.VmdkProcessor; -import com.cloud.storage.template.RawImageProcessor; -import com.cloud.storage.template.TARProcessor; -import com.cloud.storage.template.VhdProcessor; -import com.cloud.storage.template.TemplateConstants; +import org.apache.cloudstack.storage.NfsMountManagerImpl.PathParser; import org.apache.cloudstack.storage.command.DownloadCommand; import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType; import org.apache.cloudstack.storage.command.DownloadProgressCommand; import org.apache.cloudstack.storage.command.DownloadProgressCommand.RequestType; -import org.apache.cloudstack.storage.NfsMountManagerImpl.PathParser; import org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource; import org.apache.cloudstack.storage.resource.SecondaryStorageResource; +import org.apache.cloudstack.utils.security.ChecksumValue; +import org.apache.cloudstack.utils.security.DigestHelper; +import org.apache.commons.lang3.StringUtils; import org.apache.log4j.Logger; import com.cloud.agent.api.storage.DownloadAnswer; -import com.cloud.utils.net.Proxy; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.NfsTO; +import com.cloud.agent.api.to.OVFInformationTO; import com.cloud.agent.api.to.S3TO; +import com.cloud.agent.api.to.SwiftTO; import com.cloud.exception.InternalErrorException; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageLayer; import com.cloud.storage.VMTemplateStorageResourceAssoc; +import com.cloud.storage.template.HttpTemplateDownloader; +import com.cloud.storage.template.IsoProcessor; +import com.cloud.storage.template.LocalTemplateDownloader; +import com.cloud.storage.template.MetalinkTemplateDownloader; +import com.cloud.storage.template.OVAProcessor; +import com.cloud.storage.template.Processor; import com.cloud.storage.template.Processor.FormatInfo; +import com.cloud.storage.template.QCOW2Processor; +import com.cloud.storage.template.RawImageProcessor; +import com.cloud.storage.template.S3TemplateDownloader; +import com.cloud.storage.template.ScpTemplateDownloader; +import com.cloud.storage.template.SwiftVolumeDownloader; +import com.cloud.storage.template.TARProcessor; +import com.cloud.storage.template.TemplateConstants; +import com.cloud.storage.template.TemplateDownloader; import com.cloud.storage.template.TemplateDownloader.DownloadCompleteCallback; import com.cloud.storage.template.TemplateDownloader.Status; +import com.cloud.storage.template.TemplateLocation; +import com.cloud.storage.template.TemplateProp; +import com.cloud.storage.template.VhdProcessor; +import com.cloud.storage.template.VmdkProcessor; import com.cloud.utils.NumbersUtil; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.net.Proxy; import com.cloud.utils.script.Script; import com.cloud.utils.storage.QCOW2Utils; -import org.apache.cloudstack.utils.security.ChecksumValue; -import org.apache.cloudstack.utils.security.DigestHelper; -import org.apache.commons.lang3.StringUtils; - -import static com.cloud.utils.NumbersUtil.toHumanReadableSize; public class DownloadManagerImpl extends ManagerBase implements DownloadManager { private String _name; @@ -142,6 +144,19 @@ public DownloadJob(TemplateDownloader td, String jobId, long id, String tmpltNam this.resourceType = resourceType; } + public DownloadJob(TemplateDownloader td, String jobId, long id, String tmpltName, ImageFormat format, String installPathPrefix) { + super(); + this.td = td; + this.tmpltName = tmpltName; + this.format = format; + this.hvm = false; + this.description = null; + this.installPathPrefix = installPathPrefix; + this.templatesize = 0; + this.id = id; + this.resourceType = null; + } + public String getDescription() { return description; } @@ -304,8 +319,27 @@ public void setDownloadStatus(String jobId, Status status) { td.setStatus(Status.POST_DOWNLOAD_FINISHED); td.setDownloadError("Install completed successfully at " + new SimpleDateFormat().format(new Date())); } - } - else { + } else if (td instanceof SwiftVolumeDownloader) { + dj.setCheckSum(((SwiftVolumeDownloader) td).getMd5sum()); + if ("vhd".equals(((SwiftVolumeDownloader) td).getFileExtension()) || + "VHD".equals(((SwiftVolumeDownloader) td).getFileExtension())) { + Processor vhdProcessor = _processors.get("VHD Processor"); + long virtualSize = 0; + try { + virtualSize = vhdProcessor.getVirtualSize(((SwiftVolumeDownloader) td).getVolumeFile()); + dj.setTemplatesize(virtualSize); + } catch (IOException e) { + LOGGER.error("Unable to read VHD file", e); + e.printStackTrace(); + } + } else { + dj.setTemplatesize(((SwiftVolumeDownloader) td).getDownloadedBytes()); + } + dj.setTemplatePhysicalSize(((SwiftVolumeDownloader) td).getDownloadedBytes()); + dj.setTmpltPath(((SwiftVolumeDownloader) td).getDownloadLocalPath()); + td.setStatus(Status.POST_DOWNLOAD_FINISHED); + td.setDownloadError("Volume downloaded to swift cache successfully at " + new SimpleDateFormat().format(new Date())); + } else { // For other TemplateDownloaders where files are locally available, // we run the postLocalDownload() method. td.setDownloadError("Download success, starting install "); @@ -550,7 +584,7 @@ public Status getDownloadStatus(String jobId) { @Override public String downloadS3Template(S3TO s3, long id, String url, String name, ImageFormat format, boolean hvm, Long accountId, String descr, String cksum, - String installPathPrefix, String user, String password, long maxTemplateSizeInBytes, Proxy proxy, ResourceType resourceType) { + String installPathPrefix, String user, String password, long maxTemplateSizeInBytes, Proxy proxy, ResourceType resourceType) { UUID uuid = UUID.randomUUID(); String jobId = uuid.toString(); @@ -578,6 +612,36 @@ public String downloadS3Template(S3TO s3, long id, String url, String name, Imag return jobId; } + @Override + public String downloadSwiftVolume(DownloadCommand cmd, String installPathPrefix, long maxDownloadSizeInBytes) { + UUID uuid = UUID.randomUUID(); + String jobId = uuid.toString(); + //TODO get from global config + long maxVolumeSizeInBytes = maxDownloadSizeInBytes; + URI uri = null; + try { + uri = new URI(cmd.getUrl()); + } catch (URISyntaxException e) { + throw new CloudRuntimeException("URI is incorrect: " + cmd.getUrl()); + } + TemplateDownloader td; + if ((uri != null) && (uri.getScheme() != null)) { + if (uri.getScheme().equalsIgnoreCase("http") || uri.getScheme().equalsIgnoreCase("https")) { + td = new SwiftVolumeDownloader(cmd, new Completion(jobId), maxVolumeSizeInBytes, installPathPrefix); + } else { + throw new CloudRuntimeException("Scheme is not supported " + cmd.getUrl()); + } + } else { + throw new CloudRuntimeException("Unable to download from URL: " + cmd.getUrl()); + } + DownloadJob dj = new DownloadJob(td, jobId, cmd.getId(), cmd.getName(), cmd.getFormat(), cmd.getInstallPath()); + dj.setTmpltPath(installPathPrefix); + jobs.put(jobId, dj); + threadPool.execute(td); + + return jobId; + } + @Override public String downloadPublicTemplate(long id, String url, String name, ImageFormat format, boolean hvm, Long accountId, String descr, String cksum, String installPathPrefix, String templatePath, String user, String password, long maxTemplateSizeInBytes, Proxy proxy, ResourceType resourceType) { @@ -734,7 +798,7 @@ public DownloadAnswer handleDownloadCommand(SecondaryStorageResource resource, D this._processTimeout = timeout; ResourceType resourceType = cmd.getResourceType(); if (cmd instanceof DownloadProgressCommand) { - return handleDownloadProgressCmd(resource, (DownloadProgressCommand)cmd); + return handleDownloadProgressCmd(resource, (DownloadProgressCommand) cmd); } if (cmd.getUrl() == null) { @@ -746,7 +810,7 @@ public DownloadAnswer handleDownloadCommand(SecondaryStorageResource resource, D return new DownloadAnswer("Invalid Name", VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR); } - if(! DigestHelper.isAlgorithmSupported(cmd.getChecksum())) { + if (cmd.getChecksum() != null && !DigestHelper.isAlgorithmSupported(cmd.getChecksum())) { return new DownloadAnswer("invalid algorithm: " + cmd.getChecksum(), VMTemplateStorageResourceAssoc.Status.NOT_DOWNLOADED); } @@ -754,7 +818,9 @@ public DownloadAnswer handleDownloadCommand(SecondaryStorageResource resource, D String installPathPrefix = cmd.getInstallPath(); // for NFS, we need to get mounted path if (dstore instanceof NfsTO) { - installPathPrefix = resource.getRootDir(((NfsTO)dstore).getUrl(), _nfsVersion) + File.separator + installPathPrefix; + installPathPrefix = resource.getRootDir(((NfsTO) dstore).getUrl(), _nfsVersion) + File.separator + installPathPrefix; + } else if (dstore instanceof SwiftTO) { + installPathPrefix = resource.getRootDir(cmd.getCacheStore().getUrl(),_nfsVersion); } String user = null; String password = null; @@ -768,8 +834,10 @@ public DownloadAnswer handleDownloadCommand(SecondaryStorageResource resource, D String jobId = null; if (dstore instanceof S3TO) { jobId = - downloadS3Template((S3TO)dstore, cmd.getId(), cmd.getUrl(), cmd.getName(), cmd.getFormat(), cmd.isHvm(), cmd.getAccountId(), cmd.getDescription(), + downloadS3Template((S3TO) dstore, cmd.getId(), cmd.getUrl(), cmd.getName(), cmd.getFormat(), cmd.isHvm(), cmd.getAccountId(), cmd.getDescription(), cmd.getChecksum(), installPathPrefix, user, password, maxDownloadSizeInBytes, cmd.getProxy(), resourceType); + } else if (dstore instanceof SwiftTO) { + jobId = downloadSwiftVolume(cmd, installPathPrefix, maxDownloadSizeInBytes); } else { jobId = downloadPublicTemplate(cmd.getId(), cmd.getUrl(), cmd.getName(), cmd.getFormat(), cmd.isHvm(), cmd.getAccountId(), cmd.getDescription(), diff --git a/systemvm/debian/etc/haproxy/haproxy.cfg b/systemvm/debian/etc/haproxy/haproxy.cfg index 63da1ea694dd..1bde4f9e56c0 100644 --- a/systemvm/debian/etc/haproxy/haproxy.cfg +++ b/systemvm/debian/etc/haproxy/haproxy.cfg @@ -23,5 +23,5 @@ defaults listen cloud-default - bind 0.0.0.0:35999 + bind 127.0.0.1:35999 option transparent diff --git a/systemvm/debian/etc/ipsec.d/ikev2.conf b/systemvm/debian/etc/ipsec.d/ikev2.conf new file mode 100644 index 000000000000..9c389cf37646 --- /dev/null +++ b/systemvm/debian/etc/ipsec.d/ikev2.conf @@ -0,0 +1,26 @@ +#ipsec remote access vpn with IKEv2 configuration +config setup + plutostart=no + +conn IKEv2-Remote + dpdaction=clear + rekey=no + reauth=no + keyexchange=ikev2 + + leftauth=pubkey + left=172.26.10.151 + leftid=172.26.10.151 + leftcert=server.cert.pem + leftsendcert=always + leftsubnet=10.153.252.0/24,10.153.253.0/24 + leftfirewall=yes + + right=%any + rightsourceip=10.1.2.0/24 + rightauth=eap-mschapv2 + rightsendcert=never # see note + + eap_identity=%any + + auto=add \ No newline at end of file diff --git a/systemvm/debian/etc/iptables/iptables-dhcpsrvr b/systemvm/debian/etc/iptables/iptables-dhcpsrvr index b95e296d6b0a..6d8887f2b10e 100644 --- a/systemvm/debian/etc/iptables/iptables-dhcpsrvr +++ b/systemvm/debian/etc/iptables/iptables-dhcpsrvr @@ -36,6 +36,7 @@ COMMIT -A INPUT -i eth0 -p udp -m udp --dport 67 -j ACCEPT -A INPUT -i eth0 -p udp -m udp --dport 53 -j ACCEPT -A INPUT -i eth0 -p tcp -m tcp --dport 53 -j ACCEPT +-A INPUT -i eth0 -p tcp -m tcp --dport 8080 -j ACCEPT -A INPUT -i eth1 -p tcp -m tcp -m state --state NEW,ESTABLISHED --dport 3922 -j ACCEPT -A FORWARD -i eth0 -o eth1 -m state --state RELATED,ESTABLISHED -j ACCEPT -A FORWARD -i eth2 -o eth0 -m state --state RELATED,ESTABLISHED -j ACCEPT diff --git a/systemvm/debian/etc/iptables/iptables-router b/systemvm/debian/etc/iptables/iptables-router index 9851ee7dbd96..0c1b10e84a54 100644 --- a/systemvm/debian/etc/iptables/iptables-router +++ b/systemvm/debian/etc/iptables/iptables-router @@ -31,7 +31,9 @@ COMMIT -A INPUT -i eth0 -m state --state RELATED,ESTABLISHED -j ACCEPT -A INPUT -i eth1 -m state --state RELATED,ESTABLISHED -j ACCEPT -A INPUT -i eth2 -m state --state RELATED,ESTABLISHED -j ACCEPT --A INPUT -p icmp -j ACCEPT +-A INPUT -p icmp -m icmp --icmp-type 0 -j ACCEPT +-A INPUT -p icmp -m icmp --icmp-type 8 -j ACCEPT +-A INPUT -m u32 --u32 "6&0xFF=1 && 17&0xFF=68" -j DROP -A INPUT -i lo -j ACCEPT -A INPUT -i eth0 -p udp -m udp --dport 67 -j ACCEPT -A INPUT -i eth0 -p udp -m udp --dport 53 -j ACCEPT diff --git a/systemvm/debian/etc/iptables/iptables-vpcrouter b/systemvm/debian/etc/iptables/iptables-vpcrouter index e6237c5a1cd8..53d03eb2c4ed 100644 --- a/systemvm/debian/etc/iptables/iptables-vpcrouter +++ b/systemvm/debian/etc/iptables/iptables-vpcrouter @@ -26,7 +26,9 @@ COMMIT :OUTPUT ACCEPT [0:0] -A INPUT -d 224.0.0.18/32 -j ACCEPT -A INPUT -d 225.0.0.50/32 -j ACCEPT --A INPUT -p icmp -j ACCEPT +-A INPUT -p icmp -m icmp --icmp-type 0 -j ACCEPT +-A INPUT -p icmp -m icmp --icmp-type 8 -j ACCEPT +-A INPUT -m u32 --u32 "6&0xFF=1 && 17&0xFF=68" -j DROP -A INPUT -i lo -j ACCEPT -A INPUT -i eth0 -p tcp -m tcp -m state --state NEW,ESTABLISHED --dport 3922 -j ACCEPT -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT diff --git a/systemvm/debian/etc/logrotate.d/haproxy b/systemvm/debian/etc/logrotate.d/haproxy index 464209791a36..a6d4f7ebed33 100644 --- a/systemvm/debian/etc/logrotate.d/haproxy +++ b/systemvm/debian/etc/logrotate.d/haproxy @@ -1,5 +1,5 @@ /var/log/haproxy.log { - rotate 5 + rotate 2 missingok notifempty maxsize 10M diff --git a/systemvm/debian/opt/cloud/bin/configure.py b/systemvm/debian/opt/cloud/bin/configure.py index 67e575bfb7ad..4d5dfbb8931d 100755 --- a/systemvm/debian/opt/cloud/bin/configure.py +++ b/systemvm/debian/opt/cloud/bin/configure.py @@ -895,17 +895,32 @@ def convert_sec_to_h(self, val): class CsVpnUser(CsDataBag): PPP_CHAP = '/etc/ppp/chap-secrets' + IKEV2_SECRETS = '/etc/ipsec.d/ipsec.any.secrets' def process(self): + vpn_type = self.dbag['vpn_type'] for user in self.dbag: if user == 'id': continue + elif user == 'vpn_type': + continue userconfig = self.dbag[user] if userconfig['add']: - self.add_l2tp_ipsec_user(user, userconfig) + if vpn_type == "ikev2": + self.add_ikev2_ipsec_user(user, userconfig) + elif vpn_type == "l2tp": + self.add_l2tp_ipsec_user(user, userconfig) else: - self.del_l2tp_ipsec_user(user, userconfig) + if vpn_type == "ikev2": + self.del_ikev2_ipsec_user(user, userconfig) + elif vpn_type == "l2tp": + self.del_l2tp_ipsec_user(user, userconfig) + + if vpn_type == "ikev2": + CsHelper.execute("service ipsec start") + CsHelper.execute("ipsec update") + CsHelper.execute("ipsec rereadsecrets") def add_l2tp_ipsec_user(self, user, obj): userfound = False @@ -922,6 +937,28 @@ def add_l2tp_ipsec_user(self, user, obj): file.add(userAddEntry) file.commit() + def add_ikev2_ipsec_user(self, user, obj): + userfound = False + password = obj['password'] + + rsaEntry = ": RSA server.key.pem" + userAddEntry = "%s : EAP \"%s\"" %(user,password) + logging.debug("Adding vpn user '%s'" % user) + + file = CsFile(self.IKEV2_SECRETS) + + rsafound = file.searchString(rsaEntry, '#') + if not rsafound: + file.append(rsaEntry, 0) + + userfound = file.searchString(userAddEntry, '#') + if not userfound: + logging.debug("User is not there already, so adding user") + self.del_ikev2_ipsec_user(user, obj) + file.add(userAddEntry) + + file.commit() + def del_l2tp_ipsec_user(self, user, obj): userfound = False password = obj['password'] @@ -949,6 +986,20 @@ def del_l2tp_ipsec_user(self, user, obj): logging.debug("killing process %s" % pid) CsHelper.execute('kill -9 %s' % pid) + def del_ikev2_ipsec_user(self, user, obj): + userfound = False + password = obj['password'] + userentry = "%s : EAP \"%s\"" % (user,password) + + logging.debug("Deleting the user '%s'" % user) + file = CsFile(self.IKEV2_SECRETS) + file.deleteLine(userentry) + file.commit() + + establishedid = CsHelper.execute("ipsec statusall | grep '%s' | awk '{print $1}' | sed 's/://g'" % user) + if len(establishedid) > 0: + CsHelper.execute("ipsec down %s" % establishedid[0]) + class CsRemoteAccessVpn(CsDataBag): VPNCONFDIR = "/etc/ipsec.d" @@ -958,6 +1009,14 @@ def process(self): logging.debug(self.dbag) + l2tpconffile="%s/l2tp.conf" % (self.VPNCONFDIR) + if os.path.exists(l2tpconffile): + os.rename(l2tpconffile, l2tpconffile + "-disabled") + + ikev2conffile="%s/ikev2.conf" % (self.VPNCONFDIR) + if os.path.exists(ikev2conffile): + os.rename(ikev2conffile, ikev2conffile + "-disabled") + for public_ip in self.dbag: if public_ip == "id": continue @@ -966,36 +1025,63 @@ def process(self): # Enable remote access vpn if vpnconfig['create']: logging.debug("Enabling remote access vpn on " + public_ip) + if vpnconfig["vpn_type"] == "ikev2": + CsHelper.start_if_stopped("ipsec") + self.configure_ikev2Ipsec(public_ip, self.dbag[public_ip]) + logging.debug("Remote accessvpn data bag %s", self.dbag) + self.remoteaccessvpn_iptables(public_ip, self.dbag[public_ip]) + + CsHelper.execute("systemctl start ipsec") + CsHelper.execute("ipsec update") + CsHelper.execute("ipsec rereadsecrets") + + elif vpnconfig["vpn_type"] == "l2tp": + CsHelper.start_if_stopped("ipsec") + self.configure_l2tpIpsec(public_ip, self.dbag[public_ip]) + logging.debug("Remote accessvpn data bag %s", self.dbag) + self.remoteaccessvpn_iptables(public_ip, self.dbag[public_ip]) + + CsHelper.execute("ipsec update") + CsHelper.execute("systemctl start xl2tpd") + CsHelper.execute("ipsec rereadsecrets") - CsHelper.start_if_stopped("ipsec") - self.configure_l2tpIpsec(public_ip, self.dbag[public_ip]) - logging.debug("Remote accessvpn data bag %s", self.dbag) - self.remoteaccessvpn_iptables(public_ip, self.dbag[public_ip]) - - CsHelper.execute("ipsec update") - CsHelper.execute("systemctl start xl2tpd") - CsHelper.execute("ipsec rereadsecrets") else: + #disable remote access vpn logging.debug("Disabling remote access vpn .....") - CsHelper.execute("ipsec down L2TP-PSK") - CsHelper.execute("systemctl stop xl2tpd") + if vpnconfig["vpn_type"] == "ikev2": + if not os.path.exists(ikev2conffile): + os.rename(ikev2conffile + "-disabled", ikev2conffile) + + CsHelper.execute("ipsec down IKEv2-Remote") + CsHelper.execute("systemctl stop ipsec") + + elif vpnconfig["vpn_type"] == "l2tp": + if not os.path.exists(l2tpconffile): + os.rename(l2tpconffile + "-disabled", l2tpconffile) + + CsHelper.execute("ipsec down L2TP-PSK") + CsHelper.execute("systemctl stop xl2tpd") - def configure_l2tpIpsec(self, left, obj): + + def configure_l2tpIpsec(self, left, obj): l2tpconffile = "%s/l2tp.conf" % (self.VPNCONFDIR) vpnsecretfilte = "%s/ipsec.any.secrets" % (self.VPNCONFDIR) xl2tpdconffile = "/etc/xl2tpd/xl2tpd.conf" - xl2tpoptionsfile = "/etc/ppp/options.xl2tpd" + xl2tpoptionsfile = '/etc/ppp/options.xl2tpd' + + if not os.path.exists(l2tpconffile): + os.rename(l2tpconffile + "-disabled", l2tpconffile) + file = CsFile(l2tpconffile) localip = obj['local_ip'] localcidr = obj['local_cidr'] publicIface = obj['public_interface'] iprange = obj['ip_range'] psk = obj['preshared_key'] - # Left - l2tpfile = CsFile(l2tpconffile) - l2tpfile.addeq(" left=%s" % left) - l2tpfile.commit() + #left + file.addeq(" left=%s" % left) + file.commit() secret = CsFile(vpnsecretfilte) secret.empty() @@ -1011,6 +1097,50 @@ def configure_l2tpIpsec(self, left, obj): xl2tpoptions.search("ms-dns ", "ms-dns %s" % localip) xl2tpoptions.commit() + def configure_ikev2Ipsec(self, left, obj): + ikev2conffile = "%s/ikev2.conf" % (self.VPNCONFDIR) + vpnsecretfilte = "%s/ipsec.any.secrets" % (self.VPNCONFDIR) + + cacertfilte = "%s/cacerts/ca.cert.pem" % (self.VPNCONFDIR) + servercertfilte = "%s/certs/server.cert.pem" % (self.VPNCONFDIR) + serverkeyfilte = "%s/private/server.key.pem" % (self.VPNCONFDIR) + + localip = obj['local_ip'] + localcidr = obj['local_cidr'] + publicIface = obj['public_interface'] + iprange = obj['ip_range'] + cacert = obj['ca_cert'] + servercert = obj['server_cert'] + serverkey = obj['server_key'] + + if not os.path.exists(ikev2conffile): + os.rename(ikev2conffile + "-disabled", ikev2conffile) + + # updating 'left' detail in ikev2-remote.conf + file = CsFile(ikev2conffile) + file.addeq(" left=%s" % left) + file.addeq(" leftid=%s" % left) + file.addeq(" leftsubnet=%s" % localcidr) + file.commit() + + # CA Cert + file = CsFile(cacertfilte) + file.empty() + file.addeq(cacert) + file.commit() + + # Server Cert + file = CsFile(servercertfilte) + file.empty() + file.addeq(servercert) + file.commit() + + # Server Key + file = CsFile(serverkeyfilte) + file.empty() + file.addeq(serverkey) + file.commit() + def remoteaccessvpn_iptables(self, publicip, obj): publicdev = obj['public_interface'] localcidr = obj['local_cidr'] diff --git a/systemvm/debian/opt/cloud/bin/cs/CsAddress.py b/systemvm/debian/opt/cloud/bin/cs/CsAddress.py index e676bb5aedd6..5b4ab25235ee 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsAddress.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsAddress.py @@ -444,7 +444,10 @@ def fw_router(self): self.fw.append(["filter", "", "-A INPUT -d 225.0.0.50/32 -j ACCEPT"]) self.fw.append(["filter", "", "-A INPUT -i %s -m state --state RELATED,ESTABLISHED -j ACCEPT" % self.dev]) - self.fw.append(["filter", "", "-A INPUT -p icmp -j ACCEPT"]) + self.fw.append(["filter", "front", "-A INPUT -p icmp -m icmp --icmp-type 0 -j ACCEPT"]) + self.fw.append(["filter", "front", "-A INPUT -p icmp -m icmp --icmp-type 8 -j ACCEPT"]) + self.fw.append(["filter", "front", "-A INPUT -m u32 --u32 \"6&0xFF=1 && 17&0xFF=68\" -j DROP"]) + self.fw.append(["filter", "", "-A INPUT -p icmp -j DROP"]) self.fw.append(["filter", "", "-A INPUT -i lo -j ACCEPT"]) if self.get_type() in ["guest"]: @@ -528,6 +531,21 @@ def fw_vpcrouter(self): self.fw.append(["mangle", "front", "-A PREROUTING -s %s -d %s -m state --state NEW -j MARK --set-xmark %s/0xffffffff" % (self.cl.get_vpccidr(), self.address['network'], hex(100 + int(self.dev[3:])))]) +# ======= +# self.fw.append(["", "front", "-A NETWORK_STATS_%s -i %s -d %s" % +# ("eth1", "eth1", guestNetworkCidr)]) +# self.fw.append(["", "front", "-A NETWORK_STATS_%s -o %s -s %s" % +# ("eth1", "eth1", guestNetworkCidr)]) +# +# # Add rules for network traffic we want to track separately +# vpccidr = self.config.cmdline().get_vpccidr() +# for whitelistcidr in self.config.cmdline().get_vpcusagewhitelist(): +# self.fw.append(["", "", "-A NETWORK_STATS_%s -o %s -d %s -s %s -m comment --comment whitelist" % +# ("eth1", "eth1", whitelistcidr, vpccidr)]) +# self.fw.append(["", "", "-A NETWORK_STATS_%s -i %s -s %s -d %s -m comment --comment whitelist" % +# ("eth1", "eth1", whitelistcidr, vpccidr)]) +# +# >>>>>>> ak-ht-rebase-4.13 if self.address["source_nat"]: self.fw.append(["nat", "front", "-A POSTROUTING -o %s -j SNAT --to-source %s" % @@ -572,7 +590,10 @@ def fw_vpcrouter(self): self.fw.append(["filter", "", "-A INPUT -d 224.0.0.18/32 -j ACCEPT"]) self.fw.append(["filter", "", "-A INPUT -d 225.0.0.50/32 -j ACCEPT"]) - self.fw.append(["filter", "", "-A INPUT -p icmp -j ACCEPT"]) + self.fw.append(["filter", "front", "-A INPUT -p icmp -m icmp --icmp-type 0 -j ACCEPT"]) + self.fw.append(["filter", "front", "-A INPUT -p icmp -m icmp --icmp-type 8 -j ACCEPT"]) + self.fw.append(["filter", "front", "-A INPUT -m u32 --u32 \"6&0xFF=1 && 17&0xFF=68\" -j DROP"]) + self.fw.append(["filter", "", "-A INPUT -p icmp -j DROP"]) self.fw.append(["filter", "", "-A INPUT -i lo -j ACCEPT"]) self.fw.append(["filter", "", "-A INPUT -i eth0 -p tcp -m tcp --dport 3922 -m state --state NEW,ESTABLISHED -j ACCEPT"]) diff --git a/systemvm/debian/opt/cloud/bin/cs/CsDatabag.py b/systemvm/debian/opt/cloud/bin/cs/CsDatabag.py index c000611af48a..529e15d96bb8 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsDatabag.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsDatabag.py @@ -107,6 +107,12 @@ def get_vpccidr(self): else: return "unknown" + def get_vpcusagewhitelist(self): + if "vpcusagewhitelist" in self.idata(): + return self.idata()['vpcusagewhitelist'].split(",") + else: + return [] + def get_eth2_ip(self): if "eth2ip" in self.idata(): return self.idata()['eth2ip'] diff --git a/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py b/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py index bfa17404697b..fa5f035b5158 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py @@ -17,6 +17,7 @@ import CsHelper import logging import os +import re from netaddr import * from random import randint from CsGuestNetwork import CsGuestNetwork @@ -28,7 +29,6 @@ DHCP_OPTS = "/etc/dhcpopts.txt" CLOUD_CONF = "/etc/dnsmasq.d/cloud.conf" - class CsDhcp(CsDataBag): """ Manage dhcp entries """ diff --git a/systemvm/debian/opt/cloud/bin/cs/CsNetfilter.py b/systemvm/debian/opt/cloud/bin/cs/CsNetfilter.py index a034034dc8bd..512aba04b507 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsNetfilter.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsNetfilter.py @@ -327,11 +327,11 @@ def get_rule(self): return self.rule def to_str(self, delete=False): - """ Convert the rule back into aynactically correct iptables command """ + """ Convert the rule back into syntactically correct iptables command """ # Order is important - order = ['-A', '-s', '-d', '!_-d', '-i', '!_-i', '-p', '-m', '-m2', '--icmp-type', '--state', + order = ['-A', '-s', '-d', '!_-d', '-i', '!_-i', '-p', '-m', '-m2', '--u32', '--icmp-type', '--state', '--dport', '--destination-port', '-o', '!_-o', '-j', '--set-xmark', '--checksum', - '--to-source', '--to-destination', '--mark'] + '--to-source', '--to-destination', '--mark', '--comment'] str = '' for k in order: if k in self.rule.keys(): diff --git a/systemvm/debian/opt/cloud/bin/cs_forwardingrules.py b/systemvm/debian/opt/cloud/bin/cs_forwardingrules.py index 974c468e8dce..2763e73dab93 100755 --- a/systemvm/debian/opt/cloud/bin/cs_forwardingrules.py +++ b/systemvm/debian/opt/cloud/bin/cs_forwardingrules.py @@ -62,6 +62,10 @@ def merge(dbag, rules): print "removing index %s" % str(index) if not index == -1: del dbag[source_ip][index] + # If all forwarding rules have been deleted + # remove IP from databag + if dbag[source_ip] == []: + del dbag[source_ip] return dbag diff --git a/systemvm/debian/opt/cloud/bin/cs_remoteaccessvpn.py b/systemvm/debian/opt/cloud/bin/cs_remoteaccessvpn.py index dff05bd28145..72fbfba2470a 100755 --- a/systemvm/debian/opt/cloud/bin/cs_remoteaccessvpn.py +++ b/systemvm/debian/opt/cloud/bin/cs_remoteaccessvpn.py @@ -19,9 +19,5 @@ def merge(dbag, vpn): key = vpn['vpn_server_ip'] - op = vpn['create'] - if key in dbag.keys() and not op: - del(dbag[key]) - else: - dbag[key] = vpn + dbag[key] = vpn return dbag diff --git a/systemvm/debian/opt/cloud/bin/cs_vpnusers.py b/systemvm/debian/opt/cloud/bin/cs_vpnusers.py index 3bef1fec239a..3d1fe7eb379b 100755 --- a/systemvm/debian/opt/cloud/bin/cs_vpnusers.py +++ b/systemvm/debian/opt/cloud/bin/cs_vpnusers.py @@ -31,11 +31,15 @@ def merge(dbag, data): for user in dbagc.keys(): if user == 'id': continue + elif user == 'vpn_type': + continue userrec = dbagc[user] add = userrec['add'] if not add: del(dbagc[user]) + dbagc['vpn_type'] = data["vpn_type"] + for user in data['vpn_users']: username = user['user'] add = user['add'] diff --git a/systemvm/debian/opt/cloud/bin/setup/common.sh b/systemvm/debian/opt/cloud/bin/setup/common.sh index e908519c459c..c6cdb49ad95b 100755 --- a/systemvm/debian/opt/cloud/bin/setup/common.sh +++ b/systemvm/debian/opt/cloud/bin/setup/common.sh @@ -560,6 +560,7 @@ setup_dnsmasq() { else echo 0 > /var/cache/cloud/dnsmasq_managed_lease fi + } setup_sshd(){ diff --git a/systemvm/debian/opt/cloud/bin/setup/consoleproxy.sh b/systemvm/debian/opt/cloud/bin/setup/consoleproxy.sh index 8006f6bb2445..39f55382d527 100755 --- a/systemvm/debian/opt/cloud/bin/setup/consoleproxy.sh +++ b/systemvm/debian/opt/cloud/bin/setup/consoleproxy.sh @@ -37,6 +37,11 @@ setup_console_proxy() { enable_irqbalance 0 rm -f /etc/logrotate.d/cloud +#======= +# #disable and stop rpcbind service on VR +# chkconfig rpcbind off +# service rpcbind stop +#>>>>>>> ak-ht-rebase-4.13 } setup_console_proxy diff --git a/systemvm/debian/opt/cloud/bin/setup/router.sh b/systemvm/debian/opt/cloud/bin/setup/router.sh index f5fa95c7b13a..528c0db43268 100755 --- a/systemvm/debian/opt/cloud/bin/setup/router.sh +++ b/systemvm/debian/opt/cloud/bin/setup/router.sh @@ -91,6 +91,11 @@ setup_router() { # Load modules to support NAT traversal in VR modprobe nf_nat_pptp +#======= +# #disable and stop rpcbind service on VR +# chkconfig rpcbind off +# service rpcbind stop +#>>>>>>> ak-ht-rebase-4.13 } routing_svcs diff --git a/systemvm/debian/opt/cloud/bin/setup/vpcrouter.sh b/systemvm/debian/opt/cloud/bin/setup/vpcrouter.sh index bfb062188254..911b3883d47f 100755 --- a/systemvm/debian/opt/cloud/bin/setup/vpcrouter.sh +++ b/systemvm/debian/opt/cloud/bin/setup/vpcrouter.sh @@ -120,6 +120,11 @@ EOF # Load modules to support NAT traversal in VR modprobe nf_nat_pptp +#======= +# #disable and stop rpcbind service on VPC VR +# chkconfig rpcbind off +# service rpcbind stop +#>>>>>>> ak-ht-rebase-4.13 } routing_svcs diff --git a/systemvm/debian/opt/cloud/bin/vpc_netusage.sh b/systemvm/debian/opt/cloud/bin/vpc_netusage.sh index 5f5dc3ed0802..486ebf93e8c1 100755 --- a/systemvm/debian/opt/cloud/bin/vpc_netusage.sh +++ b/systemvm/debian/opt/cloud/bin/vpc_netusage.sh @@ -61,7 +61,14 @@ remove_usage_rules () { } get_usage () { - iptables -L NETWORK_STATS_$ethDev -n -v -x 2> /dev/null | awk '$1 ~ /^[0-9]+$/ { printf "%s:", $2}'; > /dev/null + all_traffic=$(iptables -w -L NETWORK_STATS_$ethDev -n -v -x 2> /dev/null | grep -v whitelist | awk '$1 ~ /^[0-9]+$/ { printf "%s:", $2}'; > /dev/null) + whitelist=$(iptables -w -L NETWORK_STATS_$ethDev -n -v -x 2> /dev/null | grep whitelist | awk '$1 ~ /^[0-9]+$/ { printf "-%s:", $2}'; > /dev/null) + echo "${all_traffic}${whitelist}," + return 0 +} + +get_usage_whitelist () { + iptables -w -L NETWORK_STATS_$ethDev -n -v -x 2> /dev/null | grep whitelist | awk '$1 ~ /^[0-9]+$/ { printf "%s:", $2}'; > /dev/null return 0 } @@ -91,8 +98,9 @@ lflag= vflag= nflag= dflag= +wflag= -while getopts 'cgndrl:v:' OPTION +while getopts 'cgndrwl:v:' OPTION do case $OPTION in c) cflag=1 @@ -107,6 +115,8 @@ do v) vflag=1 vcidr="$OPTARG" ;; + w) wflag=1 + ;; n) nflag=1 ;; d) dflag=1 @@ -136,6 +146,12 @@ then unlock_exit $? $lock $locked fi +if [ "$wflag" == "1" ] +then + get_usage_whitelist + unlock_exit $? $lock $locked +fi + if [ "$nflag" == "1" ] then #get_vpn_usage diff --git a/test/integration/component/test_deploy_vgpu_vm.py b/test/integration/component/test_deploy_vgpu_vm.py index fb99e877a51c..55c92568c3ec 100644 --- a/test/integration/component/test_deploy_vgpu_vm.py +++ b/test/integration/component/test_deploy_vgpu_vm.py @@ -70,17 +70,36 @@ def setUpClass(cls): cls.k240qgpuhosts = 0 cls.k220qgpuhosts = 0 cls.k200gpuhosts = 0 + cls.v100d_32agpuhosts = 0 + cls.v100d_8qgpuhosts = 0 + cls.v100d_4agpuhosts = 0 + cls.v100d_1bgpuhosts = 0 + cls.v100d_2qgpuhosts = 0 + cls.v100d_4qgpuhosts = 0 + cls.v100d_2agpuhosts = 0 + cls.v100d_2bgpuhosts = 0 + cls.v100d_32qgpuhosts = 0 + cls.v100d_16agpuhosts = 0 + cls.v100d_1qgpuhosts = 0 + cls.v100d_2b4gpuhosts = 0 + cls.v100d_16qgpuhosts = 0 + cls.v100d_8agpuhosts = 0 + cls.v100d_1agpuhosts = 0 cls.k1passthroughgpuhosts = 0 cls.k2passthroughgpuhosts = 0 + cls.v100passthroughgpuhosts = 0 cls.nongpuhosts = [] cls.k2hosts = 0 cls.k1hosts = 0 + cls.v100hosts = 0 cls.k100_vgpu_service_offering = [] cls.k200_vgpu_service_offering = [] + cls.v100_vgpu_service_offering = [] cls.nonvgpu_service_offering = [] cls.vm_k1_card = [] cls.vm_k2_card = [] cls.vm2_k2_card = [] + cls.vm_v100_card = [] cls.nonvgpu = [] cls.vmlifecycletest = 0 cls.vmsnapwomemory = 0 @@ -102,10 +121,11 @@ def setUpClass(cls): continue k1card = len(sshClient.execute("lspci | grep \"GRID K1\"")) k2card = len(sshClient.execute("lspci | grep \"GRID K2\"")) + v100card = len(sshClient.execute("lspci | grep \"TESLA V100\"")) cls.debug( - "k1 card and k2 card details are :%s %s " % - (k1card, k2card)) - if (k2card == 0) and (k1card == 0): + "k1, k2 and v100 card details are :%s %s %s" % + (k1card, k2card, v100card)) + if (k2card == 0) and (k1card == 0) and (v100card == 0): cls.nongpuhosts.append(ghost.ipaddress) if k2card != 0: cls.k2hosts = cls.k2hosts + 1 @@ -158,7 +178,60 @@ def setUpClass(cls): if k1passthrough != 0: cls.k1passthroughgpuhosts = cls.k1passthroughgpuhosts + \ 1 - if (cls.k2hosts == 0) and (cls.k1hosts == 0): + if v100card != 0: + cls.v100hosts = cls.v100hosts + 1 + v100d_32a = len(sshClient.execute("xe vgpu-type-list model-name=\"GRID V100D-32A\"")) + v100d_8q = len(sshClient.execute("xe vgpu-type-list model-name=\"GRID V100D-8Q\"")) + v100d_4a = len(sshClient.execute("xe vgpu-type-list model-name=\"GRID V100D-4A\"")) + v100d_1b = len(sshClient.execute("xe vgpu-type-list model-name=\"GRID V100D-1B\"")) + v100d_2q = len(sshClient.execute("xe vgpu-type-list model-name=\"GRID V100D-2Q\"")) + v100d_4q = len(sshClient.execute("xe vgpu-type-list model-name=\"GRID V100D-4Q\"")) + v100d_2a = len(sshClient.execute("xe vgpu-type-list model-name=\"GRID V100D-2A\"")) + v100d_2b = len(sshClient.execute("xe vgpu-type-list model-name=\"GRID V100D-2B\"")) + v100d_32q = len(sshClient.execute("xe vgpu-type-list model-name=\"GRID V100D-32Q\"")) + v100d_16a = len(sshClient.execute("xe vgpu-type-list model-name=\"GRID V100D-16A\"")) + v100d_1q = len(sshClient.execute("xe vgpu-type-list model-name=\"GRID V100D-1Q\"")) + v100d_2b4 = len(sshClient.execute("xe vgpu-type-list model-name=\"GRID V100D-2B4\"")) + v100d_16q = len(sshClient.execute("xe vgpu-type-list model-name=\"GRID V100D-16Q\"")) + v100d_8a = len(sshClient.execute("xe vgpu-type-list model-name=\"GRID V100D-8A\"")) + v100d_1a = len(sshClient.execute("xe vgpu-type-list model-name=\"GRID V100D-1A\"")) + v100passthrough = len(sshClient.execute("xe vgpu-type-list model-name='passthrough'")) + if ((v100d_32a == 0) and (v100d_8q == 0) and (v100d_4a == 0) and (v100d_1b == 0) and (v100d_2q == 0) and (v100d_4q == 0) and (v100d_2a == 0) and (v100d_2b == 0) and (v100d_32q == 0) and (v100d_16a == 0) and (v100d_1q == 0) and (v100d_2b4 == 0) and (v100d_16q == 0) and (v100d_8a == 0) and (v100d_1a == 0) and (v100passthrough == 0)): + continue + else: + if v100d_32a != 0: + cls.v100d_32agpuhosts = cls.v100d_32agpuhosts + 1 + if v100d_8q != 0: + cls.v100d_8qgpuhosts = cls.v100d_8qgpuhosts + 1 + if v100d_4a != 0: + cls.v100d_4agpuhosts = cls.v100d_4agpuhosts + 1 + if v100d_1b != 0: + cls.v100d_1bgpuhosts = cls.v100d_1bgpuhosts + 1 + if v100d_2q != 0: + cls.v100d_2qgpuhosts = cls.v100d_2qgpuhosts + 1 + if v100d_4q != 0: + cls.v100d_4qgpuhosts = cls.v100d_4qgpuhosts + 1 + if v100d_2a != 0: + cls.v100d_2agpuhosts = cls.v100d_2agpuhosts + 1 + if v100d_2b != 0: + cls.v100d_2bgpuhosts = cls.v100d_2bgpuhosts + 1 + if v100d_32q != 0: + cls.v100d_32qgpuhosts = cls.v100d_32qgpuhosts + 1 + if v100d_16a != 0: + cls.v100d_16agpuhosts = cls.v100d_16agpuhosts + 1 + if v100d_1q != 0: + cls.v100d_1qgpuhosts = cls.v100d_1qgpuhosts + 1 + if v100d_2b4 != 0: + cls.v100d_2b4gpuhosts = cls.v100d_2b4gpuhosts + 1 + if v100d_16q != 0: + cls.v100d_16qgpuhosts = cls.v100d_16qgpuhosts + 1 + if v100d_8a != 0: + cls.v100d_8agpuhosts = cls.v100d_8agpuhosts + 1 + if v100d_1a != 0: + cls.v100d_1agpuhosts = cls.v100d_1agpuhosts + 1 + if v100passthrough != 0: + cls.v100passthroughgpuhosts = cls.v100passthroughgpuhosts + 1 + if (cls.k2hosts == 0) and (cls.k1hosts == 0) and (cls.v100hosts == 0): raise unittest.SkipTest( "No XenServer available with GPU Drivers installed") @@ -735,7 +808,7 @@ def new_template_register(self, guestostype): def deploy_vm_lifecycle(self): """ - Create Service Offerings for Both K1 and K2 cards to be used for VM life cycle tests + Create Service Offerings for K1, K2 and V100 cards to be used for VM life cycle tests """ if(self.k1hosts != 0): @@ -778,6 +851,50 @@ def deploy_vm_lifecycle(self): except Exception as e: self.fail("Failed to create the service offering, %s" % e) + if(self.v100hosts != 0): + if (self.v100d_32a != 0): + gtype = "GRID V100D-32A" + elif (self.v100d_8q != 0): + gtype = "GRID V100D-8Q" + elif (self.v100d_4a != 0): + gtype = "GRID V100D-4A" + elif (self.v100d_1b != 0): + gtype = "GRID V100D-1B" + elif (self.v100d_2q != 0): + gtype = "GRID V100D-2Q" + elif (self.v100d_4q != 0): + gtype = "GRID V100D-4Q" + elif (self.v100d_2a != 0): + gtype = "GRID V100D-2A" + elif (self.v100d_2b != 0): + gtype = "GRID V100D-2B" + elif (self.v100d_32q != 0): + gtype = "GRID V100D-32Q" + elif (self.v100d_16a != 0): + gtype = "GRID V100D-16A" + elif (self.v100d_1q != 0): + gtype = "GRID V100D-1Q" + elif (self.v100d_2b4 != 0): + gtype = "GRID V100D-2B4" + elif (self.v100d_16q != 0): + gtype = "GRID V100D-16Q" + elif (self.v100d_8a != 0): + gtype = "GRID V100D-8A" + elif (self.v100d_1a != 0): + gtype = "GRID V100D-1A" + else: + gtype = "passthrough" + + self.testdata["vgpu"]["service_offerings"][gtype]["serviceofferingdetails"] = [ + {'pciDevice': 'Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs'}, {'vgpuType': gtype}] + try: + self.__class__.v100_vgpu_service_offering = ServiceOffering.create( + self.apiclient, + self.testdata["vgpu"]["service_offerings"][gtype] + ) + except Exception as e: + self.fail("Failed to create the service offering, %s" % e) + win8templateid = self.new_template_register("Windows 8 (64-bit)") win2012templateid = self.new_template_register( "Windows Server 2012 (64-bit)") @@ -789,7 +906,7 @@ def deploy_vm_lifecycle(self): ) """ - Create Virtual Machines for Both K1 and K2 cards to be used for VM life cycle tests + Create Virtual Machines for K1, K2 and V100 cards to be used for VM life cycle tests """ if(self.k1hosts != 0): @@ -823,6 +940,16 @@ def deploy_vm_lifecycle(self): serviceofferingid=self.k200_vgpu_service_offering.id, templateid=win7templateid ) + if(self.v100hosts != 0): + self.__class__.vm_v100_card = VirtualMachine.create( + self.apiclient, + self.testdata["virtual_machine"], + accountid=self.account.name, + zoneid=self.zone.id, + domainid=self.account.domainid, + serviceofferingid=self.v100_vgpu_service_offering.id, + templateid=win8templateid + ) self.__class__.nonvgpu = VirtualMachine.create( self.apiclient, @@ -1416,6 +1543,23 @@ def test_01_list_vgpu_host_details(self): k100 = 0 k1pass = 0 + v100d_32a = 0 + v100d_8q = 0 + v100d_4a = 0 + v100d_1b = 0 + v100d_2q = 0 + v100d_4q = 0 + v100d_2a = 0 + v100d_2b = 0 + v100d_32q = 0 + v100d_16a = 0 + v100d_1q = 0 + v100d_2b4 = 0 + v100d_16q = 0 + v100d_8a = 0 + v100d_1a = 0 + v100pass = 0 + for ggroup in hhosts: if ggroup.ipaddress not in self.nongpuhosts: for gp in ggroup.gpugroup: @@ -1441,6 +1585,40 @@ def test_01_list_vgpu_host_details(self): k100 = k100 + 1 if gptype.vgputype == "passthrough": k1pass = k1pass + 1 + if gp.gpugroupname == "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs": + for gptype in gp.vgpu: + if gptype.vgputype == "GRID V100D-32A": + v100d_32a = v100d_32a + 1 + if gptype.vgputype == "GRID V100D-8Q": + v100d_8q = v100d_8q + 1 + if gptype.vgputype == "GRID V100D-4A": + v100d_4a = v100d_4a + 1 + if gptype.vgputype == "GRID V100D-1B": + v100d_1b = v100d_1b + 1 + if gptype.vgputype == "GRID V100D-2Q": + v100d_2q = v100d_2q + 1 + if gptype.vgputype == "GRID V100D-4Q": + v100d_4q = v100d_4q + 1 + if gptype.vgputype == "GRID V100D-2A": + v100d_2a = v100d_2a + 1 + if gptype.vgputype == "GRID V100D-2B": + v100d_2b = v100d_2b + 1 + if gptype.vgputype == "GRID V100D-32Q": + v100d_32q = v100d_32q + 1 + if gptype.vgputype == "GRID V100D-16A": + v100d_16a = v100d_16a + 1 + if gptype.vgputype == "GRID V100D-1Q": + v100d_1q = v100d_1q + 1 + if gptype.vgputype == "GRID V100D-2B4": + v100d_2b4 = v100d_2b4 + 1 + if gptype.vgputype == "GRID V100D-16Q": + v100d_16q = v100d_16q + 1 + if gptype.vgputype == "GRID V100D-8A": + v100d_8a = v100d_8a + 1 + if gptype.vgputype == "GRID V100D-1A": + v100d_1a = v100d_1a + 1 + if gptype.vgputype == "passthrough": + v100pass = v100pass + 1 else: self.debug("This is nongpuhost:%s" % (ggroup.ipaddress)) if self.k260qgpuhosts > 0: @@ -1472,7 +1650,54 @@ def test_01_list_vgpu_host_details(self): if not k1pass: self.fail( "list host details with K1 Passthrough vgpu are not correct") - + if self.v100d_32agpuhosts > 0: + if not v100d_32a: + self.fail("list host details with V100D-32A vgpu are not correct") + if self.v100d_8qgpuhosts > 0: + if not v100d_8q: + self.fail("list host details with V100D-8Q vgpu are not correct") + if self.v100d_4agpuhosts > 0: + if not v100d_4a: + self.fail("list host details with V100D-4A vgpu are not correct") + if self.v100d_1bgpuhosts > 0: + if not v100d_1b: + self.fail("list host details with V100D-1B vgpu are not correct") + if self.v100d_2qgpuhosts > 0: + if not v100d_2q: + self.fail("list host details with V100D-2Q vgpu are not correct") + if self.v100d_4qgpuhosts > 0: + if not v100d_4q: + self.fail("list host details with V100D-4Q vgpu are not correct") + if self.v100d_2agpuhosts > 0: + if not v100d_2a: + self.fail("list host details with V100D-2A vgpu are not correct") + if self.v100d_2bgpuhosts > 0: + if not v100d_2b: + self.fail("list host details with V100D-2B vgpu are not correct") + if self.v100d_32qgpuhosts > 0: + if not v100d_32q: + self.fail("list host details with V100D-32Q vgpu are not correct") + if self.v100d_16agpuhosts > 0: + if not v100d_16a: + self.fail("list host details with V100D-16A vgpu are not correct") + if self.v100d_1qgpuhosts > 0: + if not v100d_1q: + self.fail("list host details with V100D-1Q vgpu are not correct") + if self.v100d_2b4gpuhosts > 0: + if not v100d_2b4: + self.fail("list host details with V100D-2B4 vgpu are not correct") + if self.v100d_16qgpuhosts > 0: + if not v100d_16q: + self.fail("list host details with V100D-16Q vgpu are not correct") + if self.v100d_8agpuhosts > 0: + if not v100d_8a: + self.fail("list host details with V100D-8A vgpu are not correct") + if self.v100d_1agpuhosts > 0: + if not v100d_1a: + self.fail("list host details with V100D-1A vgpu are not correct") + if self.v100passthroughgpuhosts > 0: + if not v100pass: + self.fail("list host details with V100 passthrough are not correct") @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") def test_02_create_deploy_windows_vm_with_k100_vgpu_service_offering(self): """Test to create and deploy vm with K100 vGPU service offering""" @@ -1751,6 +1976,8 @@ def test_12_validate_deployed_vGPU_windows_vm(self): if self.__class__.vm2_k2_card is not None: self.verify_vm(self.__class__.vm2_k2_card) + if self.__class__.vm_v100_card is not None: + self.verify_vm(self.__class__.vm_v100_card) self.__class__.vmlifecycletest = 1 return @@ -1772,6 +1999,9 @@ def test_13_stop_vGPU_windows_vm(self): if self.__class__.vm2_k2_card: self.stopvm(self.__class__.vm2_k2_card) + if self.__class__.vm_v100_card: + self.stopvm(self.__class__.vm_v100_card) + return @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") @@ -1791,6 +2021,9 @@ def test_14_start_vGPU_windows_vm(self): if self.__class__.vm2_k2_card: self.startvm(self.__class__.vm2_k2_card) + if self.__class__.vm_v100_card: + self.startvm(self.__class__.vm_v100_card) + return @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") @@ -1808,6 +2041,8 @@ def test_15_restore_vGPU_windows_vm(self): self.restorevm(self.__class__.vm_k2_card) if self.__class__.vm2_k2_card: self.restorevm(self.__class__.vm2_k2_card) + if self.__class__.vm_v100_card: + self.restorevm(self.__class__.vm_v100_card) return @@ -1826,6 +2061,8 @@ def test_16_reboot_vGPU_windows_vm(self): self.rebootvm(self.__class__.vm_k2_card) if self.__class__.vm2_k2_card: self.rebootvm(self.__class__.vm2_k2_card) + if self.__class__.vm_v100_card: + self.rebootvm(self.__class__.vm_v100_card) return @@ -2048,12 +2285,16 @@ def test_26_destroy_vGPU_windows_vm(self): if self.__class__.vm2_k2_card: self.deletevm(self.__class__.vm2_k2_card) + if self.__class__.vm_v100_card: + self.deletevm(self.__class__.vm_v100_card) + if self.__class__.nonvgpu: self.deletevm(self.__class__.nonvgpu) self.cleanup.append(self.__class__.nonvgpu_service_offerin) self.cleanup.append(self.__class__.k100_vgpu_service_offering) self.cleanup.append(self.__class__.k200_vgpu_service_offering) + self.cleanup.append(self.__class__.v100_vgpu_service_offering) return @@ -2075,6 +2316,9 @@ def test_27_recover_vGPU_windows_vm(self): if self.__class__.vm2_k2_card is not None: self.recovervm(self.__class__.vm2_k2_card) + if self.__class__.vm_v100_card is not None: + self.recovervm(self.__class__.vm_v100_card) + return def test_28_destroy_vGPU_windows_vm_after_recover(self): @@ -2100,6 +2344,11 @@ def test_28_destroy_vGPU_windows_vm_after_recover(self): raise unittest.SkipTest("VM is already deleted hence skipping") self.deletevm(self.__class__.vm2_k2_card) + if self.__class__.vm_v100_card: + if self.check_vm_state(self.__class__.vm_v100_card.id) == "Expunge": + raise unittest.SkipTest("VM is already deleted hence skipping") + self.deletevm(self.__class__.vm_v100_card) + return @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") @@ -2269,6 +2518,262 @@ def test_36_K240Q_vgpuvm_K140Q_vgpuvm_offline(self): "Group of NVIDIA Corporation GK107GL [GRID K1] GPUs") return + @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") + def test_37_create_deploy_windows_vm_with_v100d_32a_vgpu_service_offering(self): + """Test to create and deploy vm with K180Q vGPU service offering""" + + v100d_32acapacity = self.check_host_vgpu_capacity( + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs", + "GRID K180Q") + + if (self.v100d_32agpuhosts == 0) or (v100d_32acapacity == 0): + raise unittest.SkipTest( + "No XenServer available with V100D-32A vGPU Drivers installed") + + self.deploy_vm( + "GRID V100D-32A", + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs") + + @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") + def test_38_create_deploy_windows_vm_with_v100d_8q_vgpu_service_offering(self): + """Test to create and deploy vm with K180Q vGPU service offering""" + + v100d_8qcapacity = self.check_host_vgpu_capacity( + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs", + "GRID K180Q") + + if (self.v100d_8qgpuhosts == 0) or (v100d_8qcapacity == 0): + raise unittest.SkipTest( + "No XenServer available with V100D-8Q vGPU Drivers installed") + + self.deploy_vm( + "GRID V100D-8Q", + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs") + + @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") + def test_39_create_deploy_windows_vm_with_v100d_4a_vgpu_service_offering(self): + """Test to create and deploy vm with K180Q vGPU service offering""" + + v100d_4acapacity = self.check_host_vgpu_capacity( + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs", + "GRID K180Q") + + if (self.v100d_4agpuhosts == 0) or (v100d_4acapacity == 0): + raise unittest.SkipTest( + "No XenServer available with V100D-4A vGPU Drivers installed") + + self.deploy_vm( + "GRID V100D-4A", + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs") + + @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") + def test_40_create_deploy_windows_vm_with_v100d_1b_vgpu_service_offering(self): + """Test to create and deploy vm with K180Q vGPU service offering""" + + v100d_1bcapacity = self.check_host_vgpu_capacity( + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs", + "GRID K180Q") + + if (self.v100d_1bgpuhosts == 0) or (v100d_1bcapacity == 0): + raise unittest.SkipTest( + "No XenServer available with V100D-1B vGPU Drivers installed") + + self.deploy_vm( + "GRID V100D-1B", + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs") + + @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") + def test_41_create_deploy_windows_vm_with_v100d_2q_vgpu_service_offering(self): + """Test to create and deploy vm with K180Q vGPU service offering""" + + v100d_2qcapacity = self.check_host_vgpu_capacity( + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs", + "GRID K180Q") + + if (self.v100d_2qgpuhosts == 0) or (v100d_2qcapacity == 0): + raise unittest.SkipTest( + "No XenServer available with V100D-2Q vGPU Drivers installed") + + self.deploy_vm( + "GRID V100D-2Q", + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs") + + @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") + def test_42_create_deploy_windows_vm_with_v100d_4q_vgpu_service_offering(self): + """Test to create and deploy vm with K180Q vGPU service offering""" + + v100d_4qcapacity = self.check_host_vgpu_capacity( + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs", + "GRID K180Q") + + if (self.v100d_4qgpuhosts == 0) or (v100d_4qcapacity == 0): + raise unittest.SkipTest( + "No XenServer available with V100D-4Q vGPU Drivers installed") + + self.deploy_vm( + "GRID V100D-4Q", + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs") + + @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") + def test_43_create_deploy_windows_vm_with_v100d_2a_vgpu_service_offering(self): + """Test to create and deploy vm with K180Q vGPU service offering""" + + v100d_2acapacity = self.check_host_vgpu_capacity( + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs", + "GRID K180Q") + + if (self.v100d_2agpuhosts == 0) or (v100d_2acapacity == 0): + raise unittest.SkipTest( + "No XenServer available with V100D-2A vGPU Drivers installed") + + self.deploy_vm( + "GRID V100D-2A", + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs") + + @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") + def test_44_create_deploy_windows_vm_with_v100d_2b_vgpu_service_offering(self): + """Test to create and deploy vm with K180Q vGPU service offering""" + + v100d_2bcapacity = self.check_host_vgpu_capacity( + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs", + "GRID K180Q") + + if (self.v100d_2bgpuhosts == 0) or (v100d_2bcapacity == 0): + raise unittest.SkipTest( + "No XenServer available with V100D-2B vGPU Drivers installed") + + self.deploy_vm( + "GRID V100D-2B", + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs") + + @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") + def test_45_create_deploy_windows_vm_with_v100d_32q_vgpu_service_offering(self): + """Test to create and deploy vm with K180Q vGPU service offering""" + + v100d_32qcapacity = self.check_host_vgpu_capacity( + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs", + "GRID K180Q") + + if (self.v100d_32qgpuhosts == 0) or (v100d_32qcapacity == 0): + raise unittest.SkipTest( + "No XenServer available with V100D-32Q vGPU Drivers installed") + + self.deploy_vm( + "GRID V100D-32Q", + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs") + + @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") + def test_46_create_deploy_windows_vm_with_v100d_16a_vgpu_service_offering(self): + """Test to create and deploy vm with K180Q vGPU service offering""" + + v100d_16acapacity = self.check_host_vgpu_capacity( + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs", + "GRID K180Q") + + if (self.v100d_16agpuhosts == 0) or (v100d_16acapacity == 0): + raise unittest.SkipTest( + "No XenServer available with V100D-16A vGPU Drivers installed") + + self.deploy_vm( + "GRID V100D-16A", + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs") + + @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") + def test_47_create_deploy_windows_vm_with_v100d_1q_vgpu_service_offering(self): + """Test to create and deploy vm with K180Q vGPU service offering""" + + v100d_1qcapacity = self.check_host_vgpu_capacity( + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs", + "GRID K180Q") + + if (self.v100d_1qgpuhosts == 0) or (v100d_1qcapacity == 0): + raise unittest.SkipTest( + "No XenServer available with V100D-1Q vGPU Drivers installed") + + self.deploy_vm( + "GRID V100D-1Q", + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs") + + @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") + def test_48_create_deploy_windows_vm_with_v100d_2b4_vgpu_service_offering(self): + """Test to create and deploy vm with K180Q vGPU service offering""" + + v100d_2b4capacity = self.check_host_vgpu_capacity( + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs", + "GRID K180Q") + + if (self.v100d_2b4gpuhosts == 0) or (v100d_2b4capacity == 0): + raise unittest.SkipTest( + "No XenServer available with V100D-2B4 vGPU Drivers installed") + + self.deploy_vm( + "GRID V100D-2B4", + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs") + + @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") + def test_49_create_deploy_windows_vm_with_v100d_16q_vgpu_service_offering(self): + """Test to create and deploy vm with K180Q vGPU service offering""" + + v100d_16qcapacity = self.check_host_vgpu_capacity( + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs", + "GRID K180Q") + + if (self.v100d_16qgpuhosts == 0) or (v100d_16qcapacity == 0): + raise unittest.SkipTest( + "No XenServer available with V100D-16Q vGPU Drivers installed") + + self.deploy_vm( + "GRID V100D-16Q", + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs") + + @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") + def test_50_create_deploy_windows_vm_with_v100d_8a_vgpu_service_offering(self): + """Test to create and deploy vm with K180Q vGPU service offering""" + + v100d_8acapacity = self.check_host_vgpu_capacity( + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs", + "GRID K180Q") + + if (self.v100d_8agpuhosts == 0) or (v100d_8acapacity == 0): + raise unittest.SkipTest( + "No XenServer available with V100D-8A vGPU Drivers installed") + + self.deploy_vm( + "GRID V100D-8A", + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs") + + @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") + def test_51_create_deploy_windows_vm_with_v100d_1a_vgpu_service_offering(self): + """Test to create and deploy vm with K180Q vGPU service offering""" + + v100d_1acapacity = self.check_host_vgpu_capacity( + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs", + "GRID K180Q") + + if (self.v100d_1agpuhosts == 0) or (v100d_1acapacity == 0): + raise unittest.SkipTest( + "No XenServer available with V100D-1A vGPU Drivers installed") + + self.deploy_vm( + "GRID V100D-1A", + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs") + + @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") + def test_52_create_deploy_windows_vm_with_v100passthrough_vgpu_service_offering(self): + """Test to create and deploy vm with K180Q vGPU service offering""" + + v100passthroughcapacity = self.check_host_vgpu_capacity( + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs", + "GRID K180Q") + + if (self.v100passthroughgpuhosts == 0) or (v100passthroughcapacity == 0): + raise unittest.SkipTest( + "No XenServer available with passthrough vGPU Drivers installed") + + self.deploy_vm( + "passthrough", + "Group of NVIDIA Corporation GV100GL [TESLA V100] GPUs") + @classmethod def tearDownClass(self): try: diff --git a/test/integration/plugins/datera/DateraCommon.py b/test/integration/plugins/datera/DateraCommon.py new file mode 100644 index 000000000000..13a83393a912 --- /dev/null +++ b/test/integration/plugins/datera/DateraCommon.py @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/test/integration/plugins/datera/TestSnapshots.py b/test/integration/plugins/datera/TestSnapshots.py new file mode 100644 index 000000000000..de5d3c64f8d3 --- /dev/null +++ b/test/integration/plugins/datera/TestSnapshots.py @@ -0,0 +1,1240 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import logging +import unittest +import random +import XenAPI +import distutils.util + +logger = logging.getLogger('myapp') +hdlr = logging.FileHandler('/var/tmp/syed.log') +formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') +hdlr.setFormatter(formatter) +logger.addHandler(hdlr) +logger.setLevel(logging.WARNING) + + +# All tests inherit from cloudstackTestCase +from marvin.cloudstackTestCase import cloudstackTestCase + +from nose.plugins.attrib import attr + +# Import Integration Libraries + +# base - contains all resources as entities and defines create, delete, list operations on them +from marvin.lib.base import Account, DiskOffering, ServiceOffering, Snapshot, StoragePool, Template, User, \ + VirtualMachine, Volume +# common - commonly used methods for all tests are listed here +from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts, list_virtual_machines, \ + list_volumes + +# utils - utility classes for common cleanup, external library wrappers, etc. +from marvin.lib.utils import cleanup_resources + +import dfs_sdk + + +class TestData(): + account = "account" + capacityBytes = "capacitybytes" + capacityIops = "capacityiops" + clusterId = "clusterId" + computeOffering = "computeoffering" + diskName = "diskname" + diskOffering = "diskoffering" + domainId = "domainId" + hypervisor = "hypervisor" + login = "login" + mvip = "mvip" + password = "password" + port = "port" + primaryStorage = "primarystorage" + provider = "provider" + scope = "scope" + Datera = "datera" + storageTag = "Datera_SAN_1" + tags = "tags" + templateCacheName = "centos56-x86-64-xen" # TODO + templateName = "templatename" + testAccount = "testaccount" + url = "url" + user = "user" + username = "username" + virtualMachine = "virtualmachine" + virtualMachine2 = "virtualmachine2" + volume_1 = "volume_1" + volume_2 = "volume_2" + xenServer = "xenserver" + zoneId = "zoneId" + + def __init__(self): + self.testdata = { + TestData.Datera: { + TestData.mvip: "192.168.22.100", + TestData.login: "admin", + TestData.password: "password", + TestData.port: 80, + TestData.url: "https://192.168.22.100:443" + }, + TestData.xenServer: { + TestData.username: "root", + TestData.password: "password" + }, + TestData.account: { + "email": "test@test.com", + "firstname": "John", + "lastname": "Doe", + "username": "test", + "password": "test" + }, + TestData.testAccount: { + "email": "test2@test2.com", + "firstname": "Jane", + "lastname": "Doe", + "username": "test2", + "password": "test" + }, + TestData.user: { + "email": "user@test.com", + "firstname": "Jane", + "lastname": "Doe", + "username": "testuser", + "password": "password" + }, + TestData.primaryStorage: { + "name": "Datera-%d" % random.randint(0, 100), + TestData.scope: "ZONE", + "url": "MVIP=192.168.22.100;SVIP=192.168.100.2;" + + "clusterAdminUsername=admin;clusterAdminPassword=password;" + + "clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" + + "numReplicas=1;", + TestData.provider: "Datera", + TestData.tags: TestData.storageTag, + TestData.capacityIops: 4500000, + TestData.capacityBytes: 2251799813685248, + TestData.hypervisor: "Any" + }, + TestData.virtualMachine: { + "name": "TestVM", + "displayname": "Test VM" + }, + TestData.virtualMachine2: { + "name": "TestVM2", + "displayname": "Test VM 2" + }, + TestData.computeOffering: { + "name": "DT_CO_1", + "displaytext": "DT_CO_1 (Min IOPS = 10,000; Max IOPS = 15,000)", + "cpunumber": 1, + "cpuspeed": 100, + "memory": 128, + "storagetype": "shared", + "customizediops": False, + "miniops": "10000", + "maxiops": "15000", + "hypervisorsnapshotreserve": 200, + "tags": TestData.storageTag + }, + TestData.diskOffering: { + "name": "DT_DO_1", + "displaytext": "DT_DO_1 (5GB Min IOPS = 300; Max IOPS = 500)", + "disksize": 5, + "customizediops": False, + "miniops": 300, + "maxiops": 500, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "testdiskofferings": { + "customiopsdo": { + "name": "DT_Custom_Iops_DO", + "displaytext": "Customized Iops DO", + "disksize": 5, + "customizediops": True, + "miniops": 500, + "maxiops": 1000, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "customsizedo": { + "name": "DT_Custom_Size_DO", + "displaytext": "Customized Size DO", + "disksize": 5, + "customizediops": False, + "miniops": 500, + "maxiops": 1000, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "customsizeandiopsdo": { + "name": "DT_Custom_Iops_Size_DO", + "displaytext": "Customized Size and Iops DO", + "disksize": 10, + "customizediops": True, + "miniops": 400, + "maxiops": 800, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "newiopsdo": { + "name": "DT_New_Iops_DO", + "displaytext": "New Iops (min=350, max = 700)", + "disksize": 5, + "miniops": 350, + "maxiops": 700, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "newsizedo": { + "name": "DT_New_Size_DO", + "displaytext": "New Size: 175", + "disksize": 10, + "miniops": 400, + "maxiops": 800, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + "newsizeandiopsdo": { + "name": "DT_New_Size_Iops_DO", + "displaytext": "New Size and Iops", + "disksize": 10, + "miniops": 200, + "maxiops": 400, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + } + }, + TestData.volume_1: { + TestData.diskName: "test-volume", + }, + TestData.volume_2: { + TestData.diskName: "test-volume-2", + }, + TestData.templateName: "tiny linux xenserver", # TODO + TestData.zoneId: 1, + TestData.clusterId: 1, + TestData.domainId: 1, + TestData.url: "192.168.129.50" + } + + def update(self, overrideFileName): + if os.path.exists(overrideFileName): + with open(overrideFileName) as fd: + self.testdata = self._update(self.testdata, json.loads(fd.read())) + + def _update(self, d, u): + + for k, v in u.iteritems(): + if isinstance(v, collections.Mapping): + r = self.update(d.get(k, {}), v) + d[k] = r + else: + d[k] = u[k] + return d + + +class TestSnapshots(cloudstackTestCase): + _vm_not_in_running_state_err_msg = "The VM should be in running state" + _should_be_zero_volume_access_groups_in_list_err_msg = "There shouldn't be any volume access groups in this list." + _should_be_zero_snapshots_in_list_err_msg = "There shouldn't be any snapshots in this list." + _should_only_be_one_snapshot_in_list_err_msg = "There should only be one snapshot in this list." + _should_be_two_snapshots_in_list_err_msg = "There should be two snapshots in this list." + _should_be_three_snapshots_in_list_err_msg = "There should be three snapshots in this list." + _should_be_zero_volumes_in_list_err_msg = "There shouldn't be any volumes in this list." + _should_only_be_one_volume_in_list_err_msg = "There should only be one volume in this list." + _should_be_two_volumes_in_list_err_msg = "There should be two volumes in this list." + _should_be_three_volumes_in_list_err_msg = "There should be three volumes in this list." + _should_be_four_volumes_in_list_err_msg = "There should be four volumes in this list." + _should_be_five_volumes_in_list_err_msg = "There should be five volumes in this list." + _should_be_six_volumes_in_list_err_msg = "There should be six volumes in this list." + _should_be_seven_volumes_in_list_err_msg = "There should be seven volumes in this list." + _should_be_five_items_in_list_err_msg = "There should be five items in this list." + _should_be_a_valid_volume_err = "There should be a valid backend volume" + + @classmethod + def setUpClass(cls): + # Set up API client + testclient = super(TestSnapshots, cls).getClsTestClient() + cls.apiClient = testclient.getApiClient() + cls.dbConnection = testclient.getDbConnection() + + td = TestData() + + if cls.config.TestData and cls.config.TestData.Path: + td.update(cls.config.TestData.Path) + + cls.testdata = td.testdata + + cls.supports_resign = cls._get_supports_resign() + + # Set up xenAPI connection + hosts = list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId]) + xenserver = cls.testdata[TestData.xenServer] + + for h in hosts: + host_ip = "https://" + h.ipaddress + try: + cls.xen_session = XenAPI.Session(host_ip) + cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) + break + except XenAPI.Failure as e: + pass + + # Set up datera connection + datera = cls.testdata[TestData.Datera] + cls.dt_client = dfs_sdk.DateraApi( + username=datera[TestData.login], + password=datera[TestData.password], + hostname=datera[TestData.mvip] + ) + + # Get Resources from Cloud Infrastructure + cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) + cls.cluster = list_clusters(cls.apiClient)[0] + cls.template = get_template(cls.apiClient, cls.zone.id, template_name=cls.testdata[TestData.templateName]) + cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) + + # Create test account + cls.account = Account.create( + cls.apiClient, + cls.testdata["account"], + admin=1 + ) + + # Set up connection to make customized API calls + cls.user = User.create( + cls.apiClient, + cls.testdata["user"], + account=cls.account.name, + domainid=cls.domain.id + ) + + primarystorage = cls.testdata[TestData.primaryStorage] + + cls.primary_storage = StoragePool.create( + cls.apiClient, + primarystorage, + scope=primarystorage[TestData.scope], + zoneid=cls.zone.id, + provider=primarystorage[TestData.provider], + tags=primarystorage[TestData.tags], + capacityiops=primarystorage[TestData.capacityIops], + capacitybytes=primarystorage[TestData.capacityBytes], + hypervisor=primarystorage[TestData.hypervisor] + ) + + cls.compute_offering = ServiceOffering.create( + cls.apiClient, + cls.testdata[TestData.computeOffering] + ) + + cls.disk_offering = DiskOffering.create( + cls.apiClient, + cls.testdata[TestData.diskOffering] + ) + + # Resources that are to be destroyed + cls._cleanup = [ + cls.compute_offering, + cls.disk_offering, + cls.user, + cls.account + ] + + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.apiClient, cls._cleanup) + + cls.primary_storage.delete(cls.apiClient) + + cls._purge_datera_template_volumes() + except Exception as e: + logging.debug("Exception in tearDownClass(cls): %s" % e) + + def setUp(self): + self.attached = False + self.cleanup = [] + + def tearDown(self): + cleanup_resources(self.apiClient, self.cleanup) + + @attr(hypervisor='XenServer') + def test_01_create_native_snapshots(self): + """ + * Create a VM using the managed disk offering + * Create 3 snapshots on the root drive + * Delete each of the snapshots while checking the backend + * Create 2 more snaphsots + * Delete the VM (should not delete the volume) + * Delete the snapshots (should delete the volume) + """ + + if not self.supports_resign: + self.skipTest("Resignature not supported, skipping") + + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True + ) + + self.assertEqual( + virtual_machine.state.lower(), + "running", + TestSnapshots._vm_not_in_running_state_err_msg + ) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=virtual_machine.id, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + vm_1_root_volume = list_volumes_response[0] + + dt_volume_name = self._get_app_instance_name_from_cs_volume(vm_1_root_volume) + + dt_volumes = self._get_dt_volumes() + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) + + dt_snapshots = self._get_native_snapshots_for_dt_volume(dt_volume) + + self._check_list(dt_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) + + primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage) + + vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, primary_storage_db_id, 1, + TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + + vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, primary_storage_db_id, 2, + TestSnapshots._should_be_two_snapshots_in_list_err_msg) + + vol_snap_3 = self._create_and_test_snapshot(vm_1_root_volume.id, primary_storage_db_id, 3, + TestSnapshots._should_be_three_snapshots_in_list_err_msg) + + self._delete_and_test_snapshot(vol_snap_2) + + self._delete_and_test_snapshot(vol_snap_1) + + self._delete_and_test_snapshot(vol_snap_3) + + vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, primary_storage_db_id, 1, + TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + + vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, primary_storage_db_id, 2, + TestSnapshots._should_be_two_snapshots_in_list_err_msg) + + virtual_machine.delete(self.apiClient, True) + + dt_volumes = self._get_dt_volumes() + + self._delete_and_test_snapshot(vol_snap_1) + + self._delete_and_test_snapshot(vol_snap_2, False) + + def test_02_create_template_from_native_snapshot(self): + + if not self.supports_resign: + self.skipTest("Resignature not supported, skipping") + + primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage) + + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True + ) + + self.assertEqual( + virtual_machine.state.lower(), + "running", + TestSnapshots._vm_not_in_running_state_err_msg + ) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=virtual_machine.id, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + vm_1_root_volume = list_volumes_response[0] + + dt_volume = self._get_dt_volume_for_cs_volume(vm_1_root_volume) + + self.assertNotEqual( + dt_volume, + None, + TestSnapshots._should_be_a_valid_volume_err + ) + + dt_snapshots = self._get_native_snapshots_for_dt_volume(dt_volume) + + self._check_list(dt_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) + + vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, primary_storage_db_id, 1, + TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + + vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, primary_storage_db_id, 2, + TestSnapshots._should_be_two_snapshots_in_list_err_msg) + + vol_snap_3 = self._create_and_test_snapshot(vm_1_root_volume.id, primary_storage_db_id, 3, + TestSnapshots._should_be_three_snapshots_in_list_err_msg) + + services = {"displaytext": "Template-1", "name": "Template-1-name", "ostype": "CentOS 5.6 (64-bit)", + "ispublic": "true"} + + template = Template.create_from_snapshot(self.apiClient, vol_snap_2, services) + + self.cleanup.append(template) + + virtual_machine_dict = {"name": "TestVM2", "displayname": "Test VM 2"} + + virtual_machine_2 = VirtualMachine.create( + self.apiClient, + virtual_machine_dict, + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=template.id, + domainid=self.domain.id, + startvm=True + ) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=virtual_machine_2.id, + listall=True + ) + + self.assertEqual( + virtual_machine_2.state.lower(), + "running", + TestSnapshots._vm_not_in_running_state_err_msg + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + vm_2_root_volume = list_volumes_response[0] + + dt_volume_2 = self._get_dt_volume_for_cs_volume(vm_2_root_volume) + + self.assertNotEqual( + dt_volume_2, + None, + TestSnapshots._should_be_a_valid_volume_err + ) + + self._delete_and_test_snapshot(vol_snap_1) + self._delete_and_test_snapshot(vol_snap_2) + self._delete_and_test_snapshot(vol_snap_3) + + virtual_machine.delete(self.apiClient, True) + virtual_machine_2.delete(self.apiClient, True) + + def test_03_create_volume_from_native_snapshot(self): + + if not self.supports_resign: + self.skipTest("Resignature not supported, skipping") + + primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage) + + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True + ) + + self.assertEqual( + virtual_machine.state.lower(), + "running", + TestSnapshots._vm_not_in_running_state_err_msg + ) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=virtual_machine.id, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + vm_1_root_volume = list_volumes_response[0] + + dt_volume_1 = self._get_dt_volume_for_cs_volume(vm_1_root_volume) + + self.assertNotEqual( + dt_volume_1, + None, + TestSnapshots._should_be_a_valid_volume_err + ) + + vol_snap_a = self._create_and_test_snapshot(vm_1_root_volume.id, primary_storage_db_id, 1, + TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + + services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True} + + volume_created_from_snapshot = Volume.create_from_snapshot(self.apiClient, vol_snap_a.id, services, + account=self.account.name, domainid=self.domain.id) + + dt_snapshot_volume = self._get_dt_volume_for_cs_volume(volume_created_from_snapshot) + + self.assertNotEqual( + dt_snapshot_volume, + None, + TestSnapshots._should_be_a_valid_volume_err + ) + + volume_created_from_snapshot = virtual_machine.attach_volume( + self.apiClient, + volume_created_from_snapshot + ) + + self._delete_and_test_snapshot(vol_snap_a) + + virtual_machine.delete(self.apiClient, True) + + list_volumes_response = list_volumes( + self.apiClient, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + data_volume = list_volumes_response[0] + + data_volume_2 = Volume(data_volume.__dict__) + + data_volume_2.delete(self.apiClient) + + self._get_dt_volume_for_cs_volume(data_volume, should_exist=False) + + def test_04_create_non_native_snapshot(self): + """ + * Create a VM using the managed disk offering + * Create 3 snapshots on the root drive + * Delete each of the snapshots while checking the backend + * Create 2 more snaphsots + * Delete the VM (should not delete the volume) + * Delete the snapshots (should delete the volume) + """ + old_supports_resign = self.supports_resign + self._set_supports_resign(False) + + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True + ) + + self.assertEqual( + virtual_machine.state.lower(), + "running", + TestSnapshots._vm_not_in_running_state_err_msg + ) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=virtual_machine.id, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + vm_1_root_volume = list_volumes_response[0] + + primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage) + + vol_snap_1 = self._create_and_test_non_native_snapshot(vm_1_root_volume.id, primary_storage_db_id, 1, + TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + + vol_snap_2 = self._create_and_test_non_native_snapshot(vm_1_root_volume.id, primary_storage_db_id, 2, + TestSnapshots._should_be_two_snapshots_in_list_err_msg) + + vol_snap_3 = self._create_and_test_non_native_snapshot(vm_1_root_volume.id, primary_storage_db_id, 3, + TestSnapshots._should_be_three_snapshots_in_list_err_msg) + + self._delete_and_test_non_native_snapshot(vol_snap_2) + + self._delete_and_test_non_native_snapshot(vol_snap_1) + + self._delete_and_test_non_native_snapshot(vol_snap_3, False) + + vol_snap_1 = self._create_and_test_non_native_snapshot(vm_1_root_volume.id, primary_storage_db_id, 1, + TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + + vol_snap_2 = self._create_and_test_non_native_snapshot(vm_1_root_volume.id, primary_storage_db_id, 2, + TestSnapshots._should_be_two_snapshots_in_list_err_msg) + + virtual_machine.delete(self.apiClient, True) + + self._delete_and_test_non_native_snapshot(vol_snap_1) + + self._delete_and_test_non_native_snapshot(vol_snap_2) + + self._set_supports_resign(old_supports_resign) + + def test_05_create_template_from_non_native_snapshot(self): + + old_supports_resign = self.supports_resign + self._set_supports_resign(False) + + primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage) + + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True + ) + + self.assertEqual( + virtual_machine.state.lower(), + "running", + TestSnapshots._vm_not_in_running_state_err_msg + ) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=virtual_machine.id, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + vm_1_root_volume = list_volumes_response[0] + + dt_volume = self._get_dt_volume_for_cs_volume(vm_1_root_volume) + + self.assertNotEqual( + dt_volume, + None, + TestSnapshots._should_be_a_valid_volume_err + ) + + vol_snap_1 = self._create_and_test_non_native_snapshot(vm_1_root_volume.id, primary_storage_db_id, 1, + TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + + vol_snap_2 = self._create_and_test_non_native_snapshot(vm_1_root_volume.id, primary_storage_db_id, 2, + TestSnapshots._should_be_two_snapshots_in_list_err_msg) + + vol_snap_3 = self._create_and_test_non_native_snapshot(vm_1_root_volume.id, primary_storage_db_id, 3, + TestSnapshots._should_be_three_snapshots_in_list_err_msg) + + services = {"displaytext": "Template-1", "name": "Template-1-name", "ostype": "CentOS 5.6 (64-bit)", + "ispublic": "true"} + + template = Template.create_from_snapshot(self.apiClient, vol_snap_2, services) + + self.cleanup.append(template) + + virtual_machine_dict = {"name": "TestVM2", "displayname": "Test VM 2"} + + virtual_machine_2 = VirtualMachine.create( + self.apiClient, + virtual_machine_dict, + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=template.id, + domainid=self.domain.id, + startvm=True + ) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=virtual_machine_2.id, + listall=True + ) + + self.assertEqual( + virtual_machine_2.state.lower(), + "running", + TestSnapshots._vm_not_in_running_state_err_msg + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + vm_2_root_volume = list_volumes_response[0] + + dt_volume_2 = self._get_dt_volume_for_cs_volume(vm_2_root_volume) + + self.assertNotEqual( + dt_volume_2, + None, + TestSnapshots._should_be_a_valid_volume_err + ) + + virtual_machine.delete(self.apiClient, True) + virtual_machine_2.delete(self.apiClient, True) + + self._delete_and_test_non_native_snapshot(vol_snap_1) + + self._delete_and_test_non_native_snapshot(vol_snap_2) + + self._delete_and_test_non_native_snapshot(vol_snap_3) + + self._set_supports_resign(old_supports_resign) + + def test_06_create_volume_from_non_native_snapshot(self): + + old_supports_resign = self.supports_resign + self._set_supports_resign(False) + + primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage) + + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True + ) + + self.assertEqual( + virtual_machine.state.lower(), + "running", + TestSnapshots._vm_not_in_running_state_err_msg + ) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=virtual_machine.id, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + vm_1_root_volume = list_volumes_response[0] + + dt_volume_1 = self._get_dt_volume_for_cs_volume(vm_1_root_volume) + + self.assertNotEqual( + dt_volume_1, + None, + TestSnapshots._should_be_a_valid_volume_err + ) + + vol_snap_a = self._create_and_test_non_native_snapshot(vm_1_root_volume.id, primary_storage_db_id, 1, + TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) + + services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True} + + volume_created_from_snapshot = Volume.create_from_snapshot(self.apiClient, vol_snap_a.id, services, + account=self.account.name, domainid=self.domain.id) + + dt_snapshot_volume = self._get_dt_volume_for_cs_volume(volume_created_from_snapshot) + + self.assertNotEqual( + dt_snapshot_volume, + None, + TestSnapshots._should_be_a_valid_volume_err + ) + + volume_created_from_snapshot = virtual_machine.attach_volume( + self.apiClient, + volume_created_from_snapshot + ) + + self._delete_and_test_non_native_snapshot(vol_snap_a) + + virtual_machine.delete(self.apiClient, True) + + list_volumes_response = list_volumes( + self.apiClient, + listall=True + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + data_volume = list_volumes_response[0] + + data_volume_2 = Volume(data_volume.__dict__) + + data_volume_2.delete(self.apiClient) + + self._get_dt_volume_for_cs_volume(data_volume, should_exist=False) + + self._set_supports_resign(old_supports_resign) + + def _check_list(self, in_list, expected_size_of_list, err_msg): + self.assertEqual( + isinstance(in_list, list), + True, + "'in_list' is not a list." + ) + + self.assertEqual( + len(in_list), + expected_size_of_list, + err_msg + ) + + def _check_volume(self, volume, volume_name): + self.assertTrue( + volume.name.startswith(volume_name), + "The volume name is incorrect." + ) + + self.assertEqual( + volume.diskofferingid, + self.disk_offering.id, + "The disk offering is incorrect." + ) + + self.assertEqual( + volume.zoneid, + self.zone.id, + "The zone is incorrect." + ) + + self.assertEqual( + volume.storagetype, + self.disk_offering.storagetype, + "The storage type is incorrect." + ) + + def _check_and_get_cs_volume(self, volume_id, volume_name): + + list_volumes_response = list_volumes( + self.apiClient, + id=volume_id + ) + + self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) + + cs_volume = list_volumes_response[0] + + self._check_volume(cs_volume, volume_name) + + return cs_volume + + def _get_app_instance_name_from_cs_volume(self, cs_volume, vol_type='VOLUME'): + return 'Cloudstack-' + vol_type + '-' + cs_volume.id + + def _get_iqn(self, cs_volume): + """ + Get IQN for the CS volume from Datera + """ + app_instance_name = self._get_app_instance_name_from_cs_volume(cs_volume) + app_instance = self.dt_client.app_instances.get(app_instance_name) + return app_instance['storage_instances']['storage-1']['access']['iqn'] + + def _get_dt_volumes(self): + return self.dt_client.app_instances.get() + + def _check_and_get_dt_volume(self, dt_volumes, dt_volume_name, should_exist=True): + dt_volume = None + dt_volumes = self._get_dt_volumes() + + for volume in dt_volumes.values(): + if volume['name'] == dt_volume_name: + dt_volume = volume + break + + if should_exist: + self.assertNotEqual( + dt_volume, + None, + "Check if Datera volume was created: " + str(dt_volumes) + ) + else: + self.assertEqual( + dt_volume, + None, + "Check if volume was deleted: " + str(dt_volumes) + ) + + return dt_volume + + @classmethod + def _set_supports_resign(cls, value=True): + supports_resign = str(value) + + sql_query = "UPDATE host_details SET value='" + supports_resign + "' WHERE name='supportsResign'" + cls.dbConnection.execute(sql_query) + + sql_query = "Update cluster_details Set value = '" + supports_resign + "' Where name = 'supportsResign'" + cls.dbConnection.execute(sql_query) + + def _get_supports_resign(cls): + + sql_query = "SELECT value from cluster_details Where name='supportsResign' AND cluster_id=%d" % cls.testdata[ + TestData.clusterId] + + sql_result = cls.dbConnection.execute(sql_query) + logger.warn(sql_result) + + if len(sql_result) < 1: + return False + + return bool(distutils.util.strtobool(sql_result[0][0].lower())) + + @classmethod + def _purge_datera_template_volumes(cls): + for ai in cls.dt_client.app_instances.get().values(): + if 'TEMPLATE' in ai['name']: + ai.set(admin_state="offline") + ai.delete() + + def _get_cs_storage_pool_db_id(self, storage_pool): + return self._get_db_id("storage_pool", storage_pool) + + def _get_db_id(self, table, db_obj): + sql_query = "Select id From " + table + " Where uuid = '" + str(db_obj.id) + "'" + + sql_result = self.dbConnection.execute(sql_query) + return sql_result[0][0] + + def _get_native_snapshots_for_dt_volume(self, dt_volume): + snapshots_dict = dt_volume['storage_instances']['storage-1']['volumes']['volume-1']['snapshots'] + return snapshots_dict.values() + + def _get_dt_volume_for_cs_volume(self, cs_volume, vol_type='VOLUME', should_exist=True): + + dt_volume_name = self._get_app_instance_name_from_cs_volume(cs_volume, vol_type) + dt_volumes = self._get_dt_volumes() + + return self._check_and_get_dt_volume(dt_volumes, dt_volume_name, should_exist) + + def _create_and_test_snapshot(self, cs_vol_id, primary_storage_db_id, expected_num_snapshots, err_mesg): + + vol_snap = Snapshot.create( + self.apiClient, + volume_id=cs_vol_id + ) + + list_volumes_response = list_volumes( + self.apiClient, + id=cs_vol_id + ) + + cs_volume = list_volumes_response[0] + dt_volume = self._get_dt_volume_for_cs_volume(cs_volume) + + dt_snapshots = self._get_native_snapshots_for_dt_volume(dt_volume) + + self._check_list(dt_snapshots, expected_num_snapshots, err_mesg) + + dt_snapshot = self._most_recent_dt_snapshot(dt_snapshots) + + vol_snap_db_id = self._get_cs_volume_snapshot_db_id(vol_snap) + + snapshot_details = self._get_snapshot_details(vol_snap_db_id) + + dt_volume_id = self._get_app_instance_name_from_cs_volume(cs_volume) + + dt_snapshot_id = dt_volume_id + ':' + dt_snapshot['timestamp'] + + self._check_snapshot_details(snapshot_details, vol_snap_db_id, dt_volume_id, dt_snapshot_id, + primary_storage_db_id) + return vol_snap + + def _create_and_test_snapshot_2(self, volume_id_for_snapshot, dt_volume_id, dt_volume_id_for_volume_snapshot, + primary_storage_db_id, dt_volume_size, + dt_account_id, expected_num_volumes, volume_err_msg): + pass + + def _delete_and_test_snapshot(self, vol_snap, check_volume=True): + vol_snap_id = vol_snap.id + vol_snap_db_id = self._get_cs_volume_snapshot_db_id(vol_snap) + + snapshot_details = self._get_snapshot_details(vol_snap_db_id) + + dt_volume_id = snapshot_details.get("DateraVolumeId") + dt_snapshot_id = snapshot_details.get("DateraSnapshotId") + + vol_snap.delete(self.apiClient) + + if check_volume: + dt_volume = self._get_datera_volume(dt_volume_id) + + dt_snapshots = self._get_native_snapshots_for_dt_volume(dt_volume) + + # check datera if it actually got deleted + self._check_dt_snapshot_does_not_exist(dt_snapshots, dt_snapshot_id) + self._check_snapshot_details_do_not_exist(vol_snap_db_id) + + def _most_recent_dt_snapshot(self, dt_snapshots): + + if dt_snapshots: + return sorted(dt_snapshots, key=lambda x: int(x['timestamp'].split('.')[0]))[-1] + + return None + + def _get_cs_volume_snapshot_db_id(self, vol_snap): + return self._get_db_id("snapshots", vol_snap) + + def _check_snapshot_details(self, snapshot_details, cs_snapshot_id, dt_volume_id, dt_snapshot_id, + storage_pool_id): + + self._check_list(snapshot_details.keys(), 5, TestSnapshots._should_be_five_items_in_list_err_msg) + + self._check_snapshot_detail(snapshot_details, cs_snapshot_id, "takeSnapshot", "true") + self._check_snapshot_detail(snapshot_details, cs_snapshot_id, "DateraVolumeId", dt_volume_id) + self._check_snapshot_detail(snapshot_details, cs_snapshot_id, "DateraSnapshotId", dt_snapshot_id) + self._check_snapshot_detail(snapshot_details, cs_snapshot_id, "DateraStoragePoolId", str(storage_pool_id)) + + # non-native + def _check_snapshot_details_non_native(self, snapshot_details, cs_snapshot_id, dt_volume_id, storage_pool_id): + + self._check_list(snapshot_details.keys(), 5, TestSnapshots._should_be_five_items_in_list_err_msg) + + self._check_snapshot_detail(snapshot_details, cs_snapshot_id, "DateraStoragePoolId", str(storage_pool_id)) + self._check_snapshot_detail(snapshot_details, cs_snapshot_id, "DateraVolumeId", dt_volume_id) + + def _check_snapshot_detail(self, snapshot_details, cs_snapshot_id, snapshot_detail_key, snapshot_detail_value): + + if snapshot_detail_key in snapshot_details: + if snapshot_details[snapshot_detail_key] == snapshot_detail_value: + return + + raise Exception( + "There is a problem with the snapshot details key '" + snapshot_detail_key + "' and value '" + str( + snapshot_detail_value) + "'.") + + def _check_snapshot_detail_starts_with(self, snapshot_details, cs_snapshot_id, snapshot_detail_key, + starts_with): + + if snapshot_detail_key in snapshot_details: + if snapshot_details[snapshot_detail_key].startswith(starts_with): + return + + raise Exception( + "There is a problem with the snapshot details key '" + snapshot_detail_key + "' and 'starts with' value '" + starts_with + "'.") + + def _get_snapshot_details(self, snapshot_db_id): + + details = {} + sql_query = "SELECT name,value FROM snapshot_details where snapshot_id=" + str(snapshot_db_id) + sql_result = self.dbConnection.execute(sql_query) + + for row in sql_result: + key = row[0] + value = row[1] + details[key] = value + + return details + + def _check_dt_snapshot_does_not_exist(self, dt_snapshots, dt_snapshot_id): + timestamp = dt_snapshot_id.split(':')[-1] + if timestamp in dt_snapshots: + raise Exception("Snapshot %s still exists on Datera" % dt_snapshot_id) + + def _check_snapshot_details_do_not_exist(self, vol_snap_db_id): + sql_query = "Select count(*) From snapshot_details Where snapshot_id = " + str(vol_snap_db_id) + + # make sure you can connect to MySQL: https://teamtreehouse.com/community/ + # cant-connect-remotely-to-mysql-server-with-mysql-workbench + sql_result = self.dbConnection.execute(sql_query) + + self.assertEqual( + sql_result[0][0], + 0, + "Snapshot details should not exist for the following CloudStack volume snapshot DB ID: " + str( + vol_snap_db_id) + ) + + def _get_datera_volume(self, vol_name): + try: + app_instance = self.dt_client.app_instances.get(vol_name) + return app_instance + except dfs_sdk.exceptions.ApiNotFoundError as e: + pass + return None + + def _delete_and_test_non_native_snapshot(self, vol_snap, check_volume=True): + + vol_snap_id = vol_snap.id + vol_snap_db_id = self._get_cs_volume_snapshot_db_id(vol_snap) + + snapshot_details = self._get_snapshot_details(vol_snap_db_id) + + dt_snapshot_id = snapshot_details.get("DateraVolumeId") + + vol_snap.delete(self.apiClient) + + dt_snapshot_volume = self._get_datera_volume(dt_snapshot_id) + + self.assertEqual(dt_snapshot_volume, + None, + TestSnapshots._should_be_zero_volumes_in_list_err_msg + ) + + # check db + self._check_snapshot_details_do_not_exist(vol_snap_db_id) + + def _create_and_test_non_native_snapshot(self, cs_vol_id, primary_storage_db_id, expected_num_snapshots, err_mesg): + + vol_snap = Snapshot.create( + self.apiClient, + volume_id=cs_vol_id + ) + + dt_snapshot_volume_name = self._get_app_instance_name_from_cs_volume(vol_snap, vol_type='SNAPSHOT') + + dt_volumes = self._get_dt_volumes() + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_snapshot_volume_name) + + vol_snap_db_id = self._get_cs_volume_snapshot_db_id(vol_snap) + + snapshot_details = self._get_snapshot_details(vol_snap_db_id) + + self._check_snapshot_details_non_native(snapshot_details, vol_snap_db_id, dt_snapshot_volume_name, + primary_storage_db_id) + + return vol_snap diff --git a/test/integration/plugins/datera/TestVolumes.py b/test/integration/plugins/datera/TestVolumes.py index 68d9896b6a95..fa4cafa3421c 100644 --- a/test/integration/plugins/datera/TestVolumes.py +++ b/test/integration/plugins/datera/TestVolumes.py @@ -26,12 +26,12 @@ import collections import distutils.util -logger = logging.getLogger(__name__) -logger_handler = logging.FileHandler('/var/tmp/{}.log'.format(__name__)) -logger_formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') -logger_handler.setFormatter(logger_formatter) -logger.addHandler(logger_handler) -logger.setLevel(logging.INFO) +logger = logging.getLogger('myapp') +hdlr = logging.FileHandler('/var/tmp/syed.log') +formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') +hdlr.setFormatter(formatter) +logger.addHandler(hdlr) +logger.setLevel(logging.WARNING) # All tests inherit from cloudstackTestCase from marvin.cloudstackTestCase import cloudstackTestCase @@ -41,22 +41,18 @@ # Import Integration Libraries # base - contains all resources as entities and defines create, delete, list operations on them -from marvin.lib.base import (Account, DiskOffering, ServiceOffering, - StoragePool, User, VirtualMachine, Volume) +from marvin.lib.base import Account, DiskOffering, ServiceOffering, StoragePool, User, VirtualMachine, Volume # common - commonly used methods for all tests are listed here -from marvin.lib.common import (get_domain, get_template, get_zone, - list_clusters, list_hosts, - list_virtual_machines, - list_volumes, list_disk_offering) +from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts, list_virtual_machines, \ + list_volumes, list_disk_offering # utils - utility classes for common cleanup, external library wrappers, etc. from marvin.lib.utils import cleanup_resources from marvin.cloudstackAPI import resizeVolume -#from dfs_sdk import DateraApi -from dfs_sdk import get_api +from dfs_sdk import DateraApi class TestData(): @@ -96,11 +92,11 @@ class TestData(): def __init__(self): self.testdata = { TestData.Datera: { - TestData.mvip: "172.19.2.214", + TestData.mvip: "192.168.22.100", TestData.login: "admin", TestData.password: "password", TestData.port: 80, - TestData.url: "https://172.19.2.214:443" + TestData.url: "https://192.168.22.100:443" }, TestData.xenServer: { TestData.username: "root", @@ -130,10 +126,10 @@ def __init__(self): TestData.primaryStorage: { "name": "Datera-%d" % random.randint(0, 100), TestData.scope: "ZONE", - "url": "MVIP=172.19.2.214;SVIP=172.28.214.9;" + + "url": "MVIP=192.168.22.100;SVIP=192.168.100.2;" + "clusterAdminUsername=admin;clusterAdminPassword=password;" + "clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" + - "numReplicas=3;", + "numReplicas=1;", TestData.provider: "Datera", TestData.tags: TestData.storageTag, TestData.capacityIops: 4500000, @@ -142,14 +138,14 @@ def __init__(self): }, TestData.virtualMachine: { "name": "TestVM", - "displayname": "TestVM", + "displayname": "Test VM", "privateport": 22, "publicport": 22, "protocol": "tcp" }, TestData.virtualMachine2: { "name": "TestVM2", - "displayname": "TestVM2", + "displayname": "Test VM 2", "privateport": 22, "publicport": 22, "protocol": "tcp" @@ -178,7 +174,6 @@ def __init__(self): "miniops": "10000", "maxiops": "15000", "hypervisorsnapshotreserve": 200, - "tags": TestData.storageTag }, TestData.diskOffering: { @@ -265,7 +260,7 @@ def __init__(self): TestData.volume_2: { TestData.diskName: "test-volume-2", }, - TestData.templateName: "tiny linux kvm", # TODO + TestData.templateName: "tiny linux xenserver", # TODO TestData.zoneId: 1, TestData.clusterId: 1, TestData.domainId: 1, @@ -323,7 +318,7 @@ def setUpXenServer(cls): @classmethod def setUpKVM(cls): - logger.info("Setting up KVM") + # KVM doesn't support root disks cls.compute_offering = ServiceOffering.create( cls.apiClient, @@ -362,11 +357,10 @@ def setUpClass(cls): # Set up datera connection datera = cls.testdata[TestData.Datera] - cls.dt_client = get_api( + cls.dt_client = DateraApi( username=datera[TestData.login], password=datera[TestData.password], - hostname=datera[TestData.mvip], - version="v2" + hostname=datera[TestData.mvip] ) # Create test account @@ -416,7 +410,6 @@ def setUpClass(cls): if cls.cluster.hypervisortype.lower() == 'kvm': cls.setUpKVM() - # Create 1 data volume_1 cls.volume = Volume.create( cls.apiClient, cls.testdata[TestData.volume_1], @@ -438,7 +431,6 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - logger.info("Tearing Down Class") try: cleanup_resources(cls.apiClient, cls._cleanup) @@ -450,49 +442,25 @@ def tearDownClass(cls): logging.debug("Exception in tearDownClass(cls): %s" % e) def setUp(self): - logger.info("Setup test") self.attached = False self.cleanup = [] def tearDown(self): - logger.info("Tearing Down test") cleanup_resources(self.apiClient, self.cleanup) - @classmethod - def _set_supports_resign(cls, val): - - supports_resign = str(val).lower() - cls.supports_resign = val - - # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench - - sql_query = "Update host_details Set value = '" + supports_resign + "' Where name = 'supportsResign'" - cls.dbConnection.execute(sql_query) - - sql_query = "Update cluster_details Set value = '" + supports_resign + "' Where name = 'supportsResign'" - cls.dbConnection.execute(sql_query) - - @classmethod - def _get_supports_resign(cls): - - sql_query = "SELECT value from cluster_details Where name='supportsResign' AND cluster_id=%d" % cls.testdata[ - TestData.clusterId] + @attr(hypervisor='XenServer') + def test_00_check_template_cache(self): - sql_result = cls.dbConnection.execute(sql_query) - logger.warn(sql_result) + if not self.supports_resign: + self.skipTest("Resignature not supported, skipping") - if len(sql_result) < 1: - return False + dt_volumes = self._get_dt_volumes() - return bool(distutils.util.strtobool(sql_result[0][0].lower())) + template_volume_name = self._get_app_instance_name_from_cs_volume(self.template, vol_type='TEMPLATE') - def _get_cs_storage_pool_db_id(self, storage_pool): - return self._get_db_id("storage_pool", storage_pool) + dt_volume = self._check_and_get_dt_volume(dt_volumes, template_volume_name) - def _get_db_id(self, table, db_obj): - sql_query = "Select id From " + table + " Where uuid = '" + str(db_obj.id) + "'" - sql_result = self.dbConnection.execute(sql_query) - return sql_result[0][0] + initiator_group_name = self._get_initiator_group_name() @classmethod def _purge_datera_volumes(cls): @@ -502,10 +470,12 @@ def _purge_datera_volumes(cls): if 'CS-T' in ai['name']: ai.set(admin_state="offline") ai.delete() +======= + self._check_initiator_group(dt_volume, initiator_group_name, False) def test_01_attach_new_volume_to_stopped_VM(self): - '''Attach a volume to a stopped virtual machine, then start VM''' + # Create VM and volume for tests virtual_machine = VirtualMachine.create( self.apiClient, @@ -518,14 +488,9 @@ def test_01_attach_new_volume_to_stopped_VM(self): startvm=True, mode='advanced' ) - self.cleanup.append(virtual_machine) - - template_volume_name = \ - self._get_app_instance_name_from_cs_volume(self.template, - vol_type='TEMPLATE') - dt_volume = self._check_and_get_dt_volume(template_volume_name) + self.cleanup.append(virtual_machine) - virtual_machine.stop(self.apiClient, forced=True) + virtual_machine.stop(self.apiClient) new_volume = Volume.create( self.apiClient, @@ -567,9 +532,11 @@ def test_01_attach_new_volume_to_stopped_VM(self): iqn = self._get_iqn(newvolume) + dt_volumes = self._get_dt_volumes() + dt_new_volname = self._get_app_instance_name_from_cs_volume(newvolume) - dt_volume = self._check_and_get_dt_volume(dt_new_volname) + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_new_volname) self._check_size_and_iops(dt_volume, newvolume, dt_volume_size) @@ -578,12 +545,13 @@ def test_01_attach_new_volume_to_stopped_VM(self): self._check_initiator_group(dt_volume, initiator_group_name) self._check_hypervisor(iqn) - logger.info("Detach volume from the VM") + virtual_machine.detach_volume( self.apiClient, new_volume ) + def test_02_attach_detach_attach_volume(self): '''Attach, detach, and attach volume to a running VM''' @@ -599,7 +567,7 @@ def test_02_attach_detach_attach_volume(self): startvm=True, mode='advanced' ) - self.cleanup.append(virtual_machine) + self.cleanup.append(virtual_machine) self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) @@ -639,7 +607,9 @@ def test_02_attach_detach_attach_volume(self): dt_volume_name = self._get_app_instance_name_from_cs_volume(self.volume) - dt_volume = self._check_and_get_dt_volume(dt_volume_name) + dt_volumes = self._get_dt_volumes() + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) self._check_initiator_group(dt_volume, initiator_group_name) @@ -676,7 +646,9 @@ def test_02_attach_detach_attach_volume(self): str(vm.state) ) - dt_volume = self._check_and_get_dt_volume(dt_volume_name) + dt_volumes = self._get_dt_volumes() + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) self._check_initiator_group(dt_volume, initiator_group_name, False) @@ -712,12 +684,15 @@ def test_02_attach_detach_attach_volume(self): TestVolumes._vm_not_in_running_state_err_msg ) - dt_volume = self._check_and_get_dt_volume(dt_volume_name) + dt_volumes = self._get_dt_volumes() + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) self._check_initiator_group(dt_volume, initiator_group_name) self._check_hypervisor(iqn) + def test_03_attached_volume_reboot_VM(self): '''Attach volume to running VM, then reboot.''' # Create VM and volume for tests @@ -732,7 +707,7 @@ def test_03_attached_volume_reboot_VM(self): startvm=True, mode='advanced' ) - self.cleanup.append(virtual_machine) + self.cleanup.append(virtual_machine) self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) @@ -773,7 +748,9 @@ def test_03_attached_volume_reboot_VM(self): volume_size_gb = self._get_volume_size_with_hsr(self.volume) - dt_volume = self._check_and_get_dt_volume(dt_volume_name) + dt_volumes = self._get_dt_volumes() + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) self._check_size_and_iops(dt_volume, vol, volume_size_gb) @@ -796,7 +773,9 @@ def test_03_attached_volume_reboot_VM(self): dt_volume_size = self._get_volume_size_with_hsr(self.volume) - dt_volume = self._check_and_get_dt_volume(dt_volume_name) + dt_volumes = self._get_dt_volumes() + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) self._check_size_and_iops(dt_volume, vol, dt_volume_size) @@ -804,190 +783,1140 @@ def test_03_attached_volume_reboot_VM(self): self._check_hypervisor(iqn) - def _check_if_device_visible_in_vm(self, vm, dev_name): - try: - ssh_client = vm.get_ssh_client() - except Exception as e: - self.fail("SSH failed for virtual machine: %s - %s" % - (vm.ipaddress, e)) + def test_04_detach_volume_reboot(self): + '''Detach volume from a running VM, then reboot.''' - cmd = "iostat | grep %s" % dev_name - res = ssh_client.execute(cmd) - logger.warn(cmd) - logger.warn(res) + # Create VM and volume for tests + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True, + mode='advanced' + ) + self.cleanup.append(virtual_machine) - if not res: - self.fail("Device %s not found on VM: %s" % (dev_name, vm.ipaddress)) - def _check_list(self, in_list, expected_size_of_list, err_msg): - self.assertEqual( - isinstance(in_list, list), - True, - "'in_list' is not a list." - ) - self.assertEqual( - len(in_list), - expected_size_of_list, - err_msg - ) + self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) - def _check_initiator_group(self, dt_volume, initiator_group_name, should_exist=True): + ####################################### + ####################################### + # STEP 1: Attach volume to running VM # + ####################################### + ####################################### - volume_initiator_groups = dt_volume['storage_instances']['storage-1']['acl_policy']['initiator_groups'] + self.volume = virtual_machine.attach_volume( + self.apiClient, + self.volume + ) - if should_exist: - self.assertTrue( - initiator_group_name in volume_initiator_groups[0], - "Initiator group not assigned to volume" - ) + self.attached = True - else: + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) - self.assertTrue( - len(volume_initiator_groups) == 0, - "Initiator group still asigined to volume, should have been removed" - ) + dt_volume_name = self._get_app_instance_name_from_cs_volume(vol) - def _check_volume(self, volume, volume_name, disk_offering): - self.assertTrue( - volume.name.startswith(volume_name), - "The volume name is incorrect." - ) + vm = self._get_vm(virtual_machine.id) self.assertEqual( - volume.diskofferingid, - disk_offering.id, - "The disk offering is incorrect." + vol.virtualmachineid, + vm.id, + TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg ) self.assertEqual( - volume.zoneid, - self.zone.id, - "The zone is incorrect." + vm.state.lower(), + 'running', + TestVolumes._vm_not_in_running_state_err_msg ) - self.assertEqual( - volume.storagetype, - self.disk_offering.storagetype, - "The storage type is incorrect." - ) + iqn = self._get_iqn(self.volume) - def _check_size_and_iops(self, dt_volume, cs_volume, size): + dt_volume_size = self._get_volume_size_with_hsr(self.volume) - dt_max_total_iops = dt_volume['storage_instances']['storage-1']['volumes']['volume-1']['performance_policy'][ - 'total_iops_max'] - self.assertEqual( - dt_max_total_iops, - cs_volume.maxiops, - "Check QOS - Max IOPS: " + str(dt_max_total_iops) - ) + dt_volumes = self._get_dt_volumes() - dt_volume_size = dt_volume['storage_instances']['storage-1']['volumes']['volume-1']['size'] - self.assertEqual( - dt_volume_size, - size, - "Check volume size: " + str(dt_volume_size) - ) + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) - def _check_and_get_cs_volume(self, volume_id, volume_name, disk_offering=None): + self._check_size_and_iops(dt_volume, vol, dt_volume_size) - if not disk_offering: - disk_offering = self.disk_offering + self._check_hypervisor(iqn) - list_volumes_response = list_volumes( + ######################################### + ######################################### + # STEP 2: Detach volume from running VM # + ######################################### + ######################################### + + self.volume = virtual_machine.detach_volume( self.apiClient, - id=volume_id + self.volume ) - self._check_list(list_volumes_response, 1, TestVolumes._should_only_be_one_volume_in_list_err_msg) + self.attached = False - cs_volume = list_volumes_response[0] + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) - self._check_volume(cs_volume, volume_name, disk_offering) + vm = self._get_vm(virtual_machine.id) - return cs_volume + self.assertEqual( + vol.virtualmachineid, + None, + "The volume should not be attached to a VM." + ) - def _get_app_instance_name_from_cs_volume(self, cs_volume, vol_type='VOLUME'): - """ - Get Datera app_instance name based on ACS data object types - Eg. CS-V-test-volume-7XWJ5Q-dfc41254-371a-40b3-b410-129eb79893c0 - """ - app_inst_prefix = 'CS' + self.assertEqual( + vm.state.lower(), + 'running', + TestVolumes._vm_not_in_running_state_err_msg + ) - if vol_type == 'VOLUME': - vol_type_char = 'V' - uuid = cs_volume.id - name = cs_volume.name - app_instance_name = app_inst_prefix + '-' + vol_type_char + '-' + name + '-' + uuid + dt_volumes = self._get_dt_volumes() - if vol_type == 'TEMPLATE': - vol_type_char = 'T' - uuid = cs_volume.id - primary_storage_db_id = str(self._get_cs_storage_pool_db_id(self.primary_storage)) - app_instance_name = app_inst_prefix + '-' + vol_type_char + '-' + uuid + '-' + primary_storage_db_id + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) - return app_instance_name + initiator_group_name = self._get_initiator_group_name() - def _get_iqn(self, cs_volume): - """ - Get IQN for the CS volume from Datera - """ - app_instance_name = self._get_app_instance_name_from_cs_volume(cs_volume) - app_instance = self.dt_client.app_instances.get(app_instance_name) - return app_instance['storage_instances']['storage-1']['access']['iqn'] + self._check_initiator_group(dt_volume, initiator_group_name, False) - def _get_cs_volume_size_with_hsr(self, cs_volume): + self._check_hypervisor(iqn, False) - disk_size_bytes = cs_volume.size + ####################################### + ####################################### + # STEP 3: Reboot VM with detached vol # + ####################################### + ####################################### - disk_offering_id = cs_volume.diskofferingid + virtual_machine.reboot(self.apiClient) - disk_offering = list_disk_offering(self.apiClient, id=disk_offering_id)[0] + dt_volumes = self._get_dt_volumes() - hsr = disk_offering.hypervisorsnapshotreserve + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) - disk_size_with_hsr_bytes = disk_size_bytes + (disk_size_bytes * hsr) / 100 + self._check_initiator_group(dt_volume, initiator_group_name, False) - disk_size_with_hsr_gb = int(math.ceil(disk_size_with_hsr_bytes / (1024 ** 3))) + self._check_hypervisor(iqn, False) - return disk_size_with_hsr_gb - def _get_volume_size_with_hsr(self, cs_volume): + def test_05_detach_vol_stopped_VM_start(self): + '''Detach volume from a stopped VM, then start.''' + # Create VM and volume for tests + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True, + mode='advanced' + ) + self.cleanup.append(virtual_machine) - app_instance_name = self._get_app_instance_name_from_cs_volume(cs_volume) - app_instance = self.dt_client.app_instances.get(app_instance_name) - volume_size_gb = app_instance['storage_instances']['storage-1']['volumes']['volume-1']['size'] + self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) - self.assertEqual( - isinstance(volume_size_gb, int), - True, - "The volume size should be a non-zero integer." + ####################################### + ####################################### + # STEP 1: Attach volume to running VM # + ####################################### + ####################################### + + self.volume = virtual_machine.attach_volume( + self.apiClient, + self.volume ) - return volume_size_gb + self.attached = True - def _get_initiator_group_name(self): - init_group_prefix = 'CS-InitiatorGroup' - initiator_group_name = init_group_prefix + '-' + self.cluster.id - self.dt_client.initiator_groups.get(initiator_group_name) - return initiator_group_name + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) - def _get_dt_volumes(self): - return self.dt_client.app_instances.get() + vm = self._get_vm(virtual_machine.id) - def _get_vm(self, vm_id): + self.assertEqual( + vol.virtualmachineid, + vm.id, + TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestVolumes._vm_not_in_running_state_err_msg + ) + + iqn = self._get_iqn(self.volume) + + dt_volume_size = self._get_volume_size_with_hsr(self.volume) + + dt_volumes = self._get_dt_volumes() + + dt_volume_name = self._get_app_instance_name_from_cs_volume(self.volume) + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) + + self._check_size_and_iops(dt_volume, vol, dt_volume_size) + + self._check_hypervisor(iqn) + + ######################################### + ######################################### + # STEP 2: Detach volume from stopped VM # + ######################################### + ######################################### + + virtual_machine.stop(self.apiClient) + + self.volume = virtual_machine.detach_volume( + self.apiClient, + self.volume + ) + + self.attached = False + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + None, + "The volume should not be attached to a VM." + ) + + self.assertEqual( + vm.state.lower(), + 'stopped', + TestVolumes._vm_not_in_stopped_state_err_msg + ) + + dt_volumes = self._get_dt_volumes() + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) + + initiator_group_name = self._get_initiator_group_name() + + self._check_initiator_group(dt_volume, initiator_group_name, False) + + self._check_hypervisor(iqn, False) + + ####################################### + ####################################### + # STEP 3: Start VM with detached vol # + ####################################### + ####################################### + + virtual_machine.start(self.apiClient) + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(virtual_machine.id) + + dt_volumes = self._get_dt_volumes() + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) + + self._check_initiator_group(dt_volume, initiator_group_name, False) + + self._check_hypervisor(iqn, False) + + + def test_06_attach_volume_to_stopped_VM(self): + '''Attach a volume to a stopped virtual machine, then start VM''' + + # Create VM and volume for tests + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True, + mode='advanced' + ) + self.cleanup.append(virtual_machine) + + virtual_machine.stop(self.apiClient) + + self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + ####################################### + ####################################### + # STEP 1: Attach volume to stopped VM # + ####################################### + ####################################### + + self.volume = virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + vm.id, + TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'stopped', + TestVolumes._vm_not_in_stopped_state_err_msg + ) + + dt_volume_size = self._get_volume_size_with_hsr(self.volume) + + dt_volumes = self._get_dt_volumes() + + dt_volume_name = self._get_app_instance_name_from_cs_volume(self.volume) + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) + + self._check_size_and_iops(dt_volume, vol, dt_volume_size) + + virtual_machine.start(self.apiClient) + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + vm.id, + TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestVolumes._vm_not_in_running_state_err_msg + ) + + iqn = self._get_iqn(self.volume) + + dt_volume_size = self._get_volume_size_with_hsr(self.volume) + + dt_volumes = self._get_dt_volumes() + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) + + self._check_size_and_iops(dt_volume, vol, dt_volume_size) + + initiator_group_name = self._get_initiator_group_name() + + self._check_initiator_group(dt_volume, initiator_group_name) + + self._check_hypervisor(iqn) + + + def test_07_destroy_expunge_VM_with_volume(self): + '''Destroy and expunge VM with attached volume''' + + ####################################### + ####################################### + # STEP 1: Create VM and attach volume # + ####################################### + ####################################### + + test_virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine2], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True, + mode='advanced' + ) + + self.volume = test_virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(test_virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + vm.id, + TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestVolumes._vm_not_in_running_state_err_msg + ) + + dt_volume_size = self._get_volume_size_with_hsr(self.volume) + + iqn = self._get_iqn(self.volume) + + dt_volume_name = self._get_app_instance_name_from_cs_volume(self.volume) + + dt_volumes = self._get_dt_volumes() + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) + + self._check_size_and_iops(dt_volume, vol, dt_volume_size) + + self._check_hypervisor(iqn) + + ####################################### + ####################################### + # STEP 2: Destroy and Expunge VM # + ####################################### + ####################################### + + test_virtual_machine.delete(self.apiClient, True) + + self.attached = False + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + self.assertEqual( + vol.virtualmachineid, + None, + "Check if attached to virtual machine" + ) + + self.assertEqual( + vol.vmname, + None, + "Check if VM was expunged" + ) + + list_virtual_machine_response = list_virtual_machines( + self.apiClient, + id=test_virtual_machine.id + ) + + self.assertEqual( + list_virtual_machine_response, + None, + "Check if VM was actually expunged" + ) + + dt_volumes = self._get_dt_volumes() + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) + + self._check_size_and_iops(dt_volume, vol, dt_volume_size) + + initiator_group_name = self._get_initiator_group_name() + + self._check_initiator_group(dt_volume, initiator_group_name, False) + + self._check_hypervisor(iqn, False) + + def test_08_delete_volume_was_attached(self): + '''Delete volume that was attached to a VM and is detached now''' + # Create VM and volume for tests + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True, + mode='advanced' + ) + self.cleanup.append(virtual_machine) + + ####################################### + ####################################### + # STEP 1: Create vol and attach to VM # + ####################################### + ####################################### + + new_volume = Volume.create( + self.apiClient, + self.testdata[TestData.volume_2], + account=self.account.name, + domainid=self.domain.id, + zoneid=self.zone.id, + diskofferingid=self.disk_offering.id + ) + + volume_to_delete_later = new_volume + + self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) + + new_volume = virtual_machine.attach_volume( + self.apiClient, + new_volume + ) + + vol = self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) + + vm = self._get_vm(virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + vm.id, + "Check if attached to virtual machine" + ) + + self.assertEqual( + vm.state.lower(), + 'running', + str(vm.state) + ) + + dt_volume_size = self._get_volume_size_with_hsr(new_volume) + + iqn = self._get_iqn(new_volume) + + dt_volumes = self._get_dt_volumes() + + dt_volume_name = self._get_app_instance_name_from_cs_volume(vol) + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) + + self._check_size_and_iops(dt_volume, vol, dt_volume_size) + + self._check_hypervisor(iqn) + + ####################################### + ####################################### + # STEP 2: Detach and delete volume # + ####################################### + ####################################### + + new_volume = virtual_machine.detach_volume( + self.apiClient, + new_volume + ) + + vol = self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) + + vm = self._get_vm(virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + None, + "Check if attached to virtual machine" + ) + + self.assertEqual( + vm.state.lower(), + 'running', + str(vm.state) + ) + + dt_volumes = self._get_dt_volumes() + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) + + self._check_size_and_iops(dt_volume, vol, dt_volume_size) + + initiator_group_name = self._get_initiator_group_name() + + self._check_initiator_group(dt_volume, initiator_group_name, False) + + self._check_hypervisor(iqn, False) + + volume_to_delete_later.delete(self.apiClient) + + list_volumes_response = list_volumes( + self.apiClient, + id=new_volume.id + ) + + self.assertEqual( + list_volumes_response, + None, + "Check volume was deleted" + ) + + dt_volumes = self._get_dt_volumes() + + self._check_and_get_dt_volume(dt_volumes, dt_volume_name, False) + + + def test_09_attach_more_than_one_disk_to_VM(self): + '''Attach more than one disk to a VM''' + + # Create VM and volume for tests + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True, + mode='advanced' + ) + self.cleanup.append(virtual_machine) + + + + volume_2 = Volume.create( + self.apiClient, + self.testdata[TestData.volume_2], + zoneid=self.zone.id, + account=self.account.name, + domainid=self.domain.id, + diskofferingid=self.disk_offering.id + ) + + self.cleanup.append(volume_2) + + self._check_and_get_cs_volume(volume_2.id, self.testdata[TestData.volume_2][TestData.diskName]) + + ####################################### + ####################################### + # Step 1: Attach volumes to VM # + ####################################### + ####################################### + + virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + virtual_machine.attach_volume( + self.apiClient, + volume_2 + ) + + vol_2 = self._check_and_get_cs_volume(volume_2.id, self.testdata[TestData.volume_2][TestData.diskName]) + + dt_volume_size = self._get_volume_size_with_hsr(self.volume) + + dt_volume_2_size = self._get_volume_size_with_hsr(volume_2) + + dt_volumes = self._get_dt_volumes() + + dt_volume_name = self._get_app_instance_name_from_cs_volume(vol) + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) + + self._check_size_and_iops(dt_volume, vol, dt_volume_size) + + iqn = self._get_iqn(self.volume) + + self._check_hypervisor(iqn) + + initiator_group_name = self._get_initiator_group_name() + + self._check_initiator_group(dt_volume, initiator_group_name) + + dt_volume_2 = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) + + dt_volume_name = self._get_app_instance_name_from_cs_volume(vol_2) + + self._check_size_and_iops(dt_volume_2, vol_2, dt_volume_2_size) + + iqn2 = self._get_iqn(volume_2) + + self._check_hypervisor(iqn2) + + self._check_initiator_group(dt_volume_2, initiator_group_name) + + virtual_machine.detach_volume(self.apiClient, volume_2) + + + def test_10_live_migrate_vm_with_volumes(self): + ''' + Live migrate a VM while it has volumes attached within a cluster + ''' + ####################################### + ####################################### + # STEP 1: Attach volume to running VM # + ####################################### + ####################################### + + # Create VM and volume for tests + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True, + mode='advanced' + ) + self.cleanup.append(virtual_machine) + + + initiator_group_name = self._get_initiator_group_name() + + virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vm = self._get_vm(virtual_machine.id) + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + self.assertEqual( + vol.virtualmachineid, + vm.id, + TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestVolumes._vm_not_in_running_state_err_msg + ) + + iqn = self._get_iqn(self.volume) + + dt_volume_name = self._get_app_instance_name_from_cs_volume(vol) + + dt_volumes = self._get_dt_volumes() + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) + + self._check_initiator_group(dt_volume, initiator_group_name) + + cs_volume_size = self._get_cs_volume_size_with_hsr(vol) + + self._check_size_and_iops(dt_volume, vol, cs_volume_size) + + self._check_hypervisor(iqn) + + self._check_if_device_visible_in_vm(virtual_machine, self.device_name) + + ######################################### + ######################################### + # STEP 2: Migrate the VM to other host # + ######################################### + ######################################### + + hosts = list_hosts(self.apiClient, clusterid=self.testdata[TestData.clusterId]) + + if len(hosts) < 2: + self.skipTest("At least two hosts should be present in the zone for migration") + + current_host_id = virtual_machine.hostid + other_host = None + for host in hosts: + if host.id != current_host_id: + other_host = host + break + + self.assertNotEqual(other_host, None, "Destination host not found") + + # Start dd on the volume + self._start_device_io(virtual_machine, self.device_name) + time.sleep(5) + bytes_written_1 = self._get_bytes_written(virtual_machine, self.device_name) + + virtual_machine.migrate(self.apiClient, other_host.id) + + list_vm_response = VirtualMachine.list(self.apiClient, id=virtual_machine.id) + + self.assertNotEqual( + list_vm_response, + None, + "Check virtual machine is listed" + ) + + vm_response = list_vm_response[0] + + self.assertEqual(vm_response.id, virtual_machine.id, "Check virtual machine ID of migrated VM") + + self.assertEqual(vm_response.hostid, other_host.id, "Check destination hostID of migrated VM") + + self._stop_device_io(virtual_machine, self.device_name) + time.sleep(5) + bytes_written_2 = self._get_bytes_written(virtual_machine, self.device_name) + + self.assertGreater(bytes_written_2, bytes_written_1, "Unable to write to device") + + ######################################### + ######################################### + # STEP 3: Detach volume from running VM # + ######################################### + ######################################### + + self.volume = virtual_machine.detach_volume( + self.apiClient, + self.volume + ) + + self.attached = False + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + None, + "The volume should not be attached to a VM." + ) + + self.assertEqual( + vm.state.lower(), + 'running', + str(vm.state) + ) + + dt_volumes = self._get_dt_volumes() + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) + + self._check_initiator_group(dt_volume, initiator_group_name, False) + + self._check_hypervisor(iqn, False) + + + def test_11_detach_resize_volume_attach(self): + ''' + Detach and resize a volume and the attach it again + ''' + + ####################################### + ####################################### + # STEP 1: Attach volume to running VM # + ####################################### + ####################################### + + # Create VM and volume for tests + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True, + mode='advanced' + ) + self.cleanup.append(virtual_machine) + + initiator_group_name = self._get_initiator_group_name() + + virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vm = self._get_vm(virtual_machine.id) + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + self.assertEqual( + vol.virtualmachineid, + vm.id, + TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestVolumes._vm_not_in_running_state_err_msg + ) + + iqn = self._get_iqn(self.volume) + + dt_volume_name = self._get_app_instance_name_from_cs_volume(vol) + + dt_volumes = self._get_dt_volumes() + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) + + self._check_initiator_group(dt_volume, initiator_group_name) + + cs_volume_size = self._get_cs_volume_size_with_hsr(vol) + + self._check_size_and_iops(dt_volume, vol, cs_volume_size) + + self._check_hypervisor(iqn) + + ######################################### + ######################################### + # STEP 2: Detach volume from running VM # + ######################################### + ######################################### + + self.volume = virtual_machine.detach_volume( + self.apiClient, + self.volume + ) + + self.attached = False + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName]) + + vm = self._get_vm(virtual_machine.id) + + self.assertEqual( + vol.virtualmachineid, + None, + "The volume should not be attached to a VM." + ) + + self.assertEqual( + vm.state.lower(), + 'running', + str(vm.state) + ) + + dt_volumes = self._get_dt_volumes() + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) + + self._check_initiator_group(dt_volume, initiator_group_name, False) + + self._check_hypervisor(iqn, False) + + ######################################### + ######################################### + # STEP 3: Resize the volume # + ######################################### + ######################################### + + self._resize_volume(self.volume, self.disk_offering_new) + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], + self.disk_offering_new) + + cs_volume_size = self._get_cs_volume_size_with_hsr(vol) + + dt_volume_name = self._get_app_instance_name_from_cs_volume(vol) + + dt_volumes = self._get_dt_volumes() + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) + + self._check_size_and_iops(dt_volume, vol, cs_volume_size) + + ######################################### + ######################################### + # STEP 4: Attach the volume # + ######################################### + ######################################### + + virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vm = self._get_vm(virtual_machine.id) + + vol = self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName], + self.disk_offering_new) + + self.assertEqual( + vol.virtualmachineid, + vm.id, + TestVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestVolumes._vm_not_in_running_state_err_msg + ) + + dt_volumes = self._get_dt_volumes() + + dt_volume = self._check_and_get_dt_volume(dt_volumes, dt_volume_name) + + iqn = self._get_iqn(self.volume) + + self._check_initiator_group(dt_volume, initiator_group_name) + + self._check_hypervisor(iqn) + + + def _check_list(self, in_list, expected_size_of_list, err_msg): + self.assertEqual( + isinstance(in_list, list), + True, + "'in_list' is not a list." + ) + + self.assertEqual( + len(in_list), + expected_size_of_list, + err_msg + ) + + def _check_initiator_group(self, dt_volume, initiator_group_name, should_exist=True): + + volume_initiator_groups = dt_volume['storage_instances']['storage-1']['acl_policy']['initiator_groups'] + + if should_exist: + self.assertTrue( + initiator_group_name in volume_initiator_groups[0], + "Initiator group not assigned to volume" + ) + + else: + + self.assertTrue( + len(volume_initiator_groups) == 0, + "Initiator group still asigined to volume, should have been removed" + ) + + def _check_volume(self, volume, volume_name, disk_offering): + self.assertTrue( + volume.name.startswith(volume_name), + "The volume name is incorrect." + ) + + self.assertEqual( + volume.diskofferingid, + disk_offering.id, + "The disk offering is incorrect." + ) + + self.assertEqual( + volume.zoneid, + self.zone.id, + "The zone is incorrect." + ) + + self.assertEqual( + volume.storagetype, + self.disk_offering.storagetype, + "The storage type is incorrect." + ) + + def _check_size_and_iops(self, dt_volume, cs_volume, size): + + dt_max_total_iops = dt_volume['storage_instances']['storage-1']['volumes']['volume-1']['performance_policy'][ + 'total_iops_max'] + self.assertEqual( + dt_max_total_iops, + cs_volume.maxiops, + "Check QOS - Max IOPS: " + str(dt_max_total_iops) + ) + + dt_volume_size = dt_volume['storage_instances']['storage-1']['volumes']['volume-1']['size'] + self.assertEqual( + dt_volume_size, + size, + "Check volume size: " + str(dt_volume_size) + ) + + def _check_and_get_cs_volume(self, volume_id, volume_name, disk_offering=None): + + if not disk_offering: + disk_offering = self.disk_offering + + list_volumes_response = list_volumes( + self.apiClient, + id=volume_id + ) + + self._check_list(list_volumes_response, 1, TestVolumes._should_only_be_one_volume_in_list_err_msg) + + cs_volume = list_volumes_response[0] + + self._check_volume(cs_volume, volume_name, disk_offering) + + return cs_volume + + def _get_app_instance_name_from_cs_volume(self, cs_volume, vol_type='VOLUME'): + app_instance_name = 'Cloudstack-' + vol_type + '-' + cs_volume.id + + if vol_type == 'TEMPLATE': + primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage) + app_instance_name += '-' + str(primary_storage_db_id) + + return app_instance_name + + def _get_iqn(self, cs_volume): + """ + Get IQN for the CS volume from Datera + """ + app_instance_name = self._get_app_instance_name_from_cs_volume(cs_volume) + app_instance = self.dt_client.app_instances.get(app_instance_name) + return app_instance['storage_instances']['storage-1']['access']['iqn'] + + def _get_cs_volume_size_with_hsr(self, cs_volume): + + disk_size_bytes = cs_volume.size + + disk_offering_id = cs_volume.diskofferingid + + disk_offering = list_disk_offering(self.apiClient, id=disk_offering_id)[0] + + hsr = disk_offering.hypervisorsnapshotreserve + + disk_size_with_hsr_bytes = disk_size_bytes + (disk_size_bytes * hsr) / 100 + + disk_size_with_hsr_gb = int(math.ceil(disk_size_with_hsr_bytes / (1024 ** 3))) + + return disk_size_with_hsr_gb + + def _get_volume_size_with_hsr(self, cs_volume): + + app_instance_name = self._get_app_instance_name_from_cs_volume(cs_volume) + app_instance = self.dt_client.app_instances.get(app_instance_name) + + volume_size_gb = app_instance['storage_instances']['storage-1']['volumes']['volume-1']['size'] + + self.assertEqual( + isinstance(volume_size_gb, int), + True, + "The volume size should be a non-zero integer." + ) + + return volume_size_gb + + def _get_initiator_group_name(self): + + initiator_group_name = 'Cloudstack-InitiatorGroup-' + self.cluster.id + self.dt_client.initiator_groups.get(initiator_group_name) + return initiator_group_name + + def _get_dt_volumes(self): + return self.dt_client.app_instances.get() + + def _get_vm(self, vm_id): list_vms_response = list_virtual_machines(self.apiClient, id=vm_id) self._check_list(list_vms_response, 1, TestVolumes._should_only_be_one_vm_in_list_err_msg) return list_vms_response[0] - def _check_and_get_dt_volume(self, dt_volume_name, should_exist=True): + def _check_and_get_dt_volume(self, dt_volumes, dt_volume_name, should_exist=True): dt_volume = None - dt_volumes = self._get_dt_volumes() for volume in list(dt_volumes.values()): if volume['name'] == dt_volume_name: @@ -1066,6 +1995,67 @@ def _check_xen_sr(self, iqn, should_exist=True): self._check_list(xen_sr, 0, TestVolumes._list_should_be_empty) + @classmethod + def _set_supports_resign(cls, val): + + supports_resign = str(val).lower() + cls.supports_resign = val + + # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench + + sql_query = "Update host_details Set value = '" + supports_resign + "' Where name = 'supportsResign'" + cls.dbConnection.execute(sql_query) + + sql_query = "Update cluster_details Set value = '" + supports_resign + "' Where name = 'supportsResign'" + cls.dbConnection.execute(sql_query) + + @classmethod + def _get_supports_resign(cls): + + sql_query = "SELECT value from cluster_details Where name='supportsResign' AND cluster_id=%d" % cls.testdata[ + TestData.clusterId] + + sql_result = cls.dbConnection.execute(sql_query) + logger.warn(sql_result) + + if len(sql_result) < 1: + return False + + return bool(distutils.util.strtobool(sql_result[0][0].lower())) + + def _get_cs_storage_pool_db_id(self, storage_pool): + return self._get_db_id("storage_pool", storage_pool) + + def _get_db_id(self, table, db_obj): + sql_query = "Select id From " + table + " Where uuid = '" + str(db_obj.id) + "'" + sql_result = self.dbConnection.execute(sql_query) + return sql_result[0][0] + + @classmethod + def _purge_datera_volumes(cls): + logger.warn("Deleting all volumes") + for ai in cls.dt_client.app_instances.get().values(): + logger.warn(ai) + if 'TEMPLATE' in ai['name']: + ai.set(admin_state="offline") + ai.delete() + + def _check_if_device_visible_in_vm(self, vm, dev_name): + + try: + ssh_client = vm.get_ssh_client() + except Exception as e: + self.fail("SSH failed for virtual machine: %s - %s" % + (vm.ipaddress, e)) + + cmd = "iostat | grep %s" % dev_name + res = ssh_client.execute(cmd) + logger.warn(cmd) + logger.warn(res) + + if not res: + self.fail("Device %s not found on VM: %s" % (dev_name, vm.ipaddress)) + def _check_if_device_removed_in_vm(self, vm, dev_name): try: diff --git a/test/integration/smoke/test_vpc_vpn.py b/test/integration/smoke/test_vpc_vpn.py index f69f84aacc32..31141095c5d6 100644 --- a/test/integration/smoke/test_vpc_vpn.py +++ b/test/integration/smoke/test_vpc_vpn.py @@ -47,6 +47,7 @@ from nose.plugins.attrib import attr import logging +import subprocess import time diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index 7d020a515a5b..c09c780944a2 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -201,8 +201,9 @@ 'UnmanagedInstance': 'Virtual Machine', 'Rolling': 'Rolling Maintenance', 'importVsphereStoragePolicies' : 'vSphere storage policies', - 'listVsphereStoragePolicies' : 'vSphere storage policies' -} + 'listVsphereStoragePolicies' : 'vSphere storage policies', + 'Maas': 'MaaS', + } categories = {} diff --git a/tools/marvin/marvin/config/test_data.py b/tools/marvin/marvin/config/test_data.py index b01335a150ff..c5dd233f3343 100644 --- a/tools/marvin/marvin/config/test_data.py +++ b/tools/marvin/marvin/config/test_data.py @@ -1444,6 +1444,126 @@ }, "service_offerings": { + "GRID V100D-32A": + { + "name": "vGPU V100D-32A", + "displaytext": "vGPU V100D-32A", + "cpunumber": 2, + "cpuspeed": 1600, \# in MHz + "memory": 3072, \# In MBs + }, + "GRID V100D-8Q": + { + "name": "vGPU V100D-8Q", + "displaytext": "vGPU V100D-8Q", + "cpunumber": 2, + "cpuspeed": 1600, \# in MHz + "memory": 3072, \# In MBs + }, + "GRID V100D-4A": + { + "name": "vGPU V100D-4A", + "displaytext": "vGPU V100D-4A", + "cpunumber": 2, + "cpuspeed": 1600, \# in MHz + "memory": 3072, \# In MBs + }, + "GRID V100D-1B": + { + "name": "vGPU V100D-1B", + "displaytext": "vGPU V100D-1B", + "cpunumber": 2, + "cpuspeed": 1600, \# in MHz + "memory": 3072, \# In MBs + }, + "GRID V100D-2Q": + { + "name": "vGPU V100D-2Q", + "displaytext": "vGPU V100D-2Q", + "cpunumber": 2, + "cpuspeed": 1600, \# in MHz + "memory": 3072, \# In MBs + }, + "GRID V100D-4Q": + { + "name": "vGPU V100D-4Q", + "displaytext": "vGPU V100D-4Q", + "cpunumber": 2, + "cpuspeed": 1600, \# in MHz + "memory": 3072, \# In MBs + }, + "GRID V100D-2A": + { + "name": "vGPU V100D-2A", + "displaytext": "vGPU V100D-2A", + "cpunumber": 2, + "cpuspeed": 1600, \# in MHz + "memory": 3072, \# In MBs + }, + "GRID V100D-2B": + { + "name": "vGPU V100D-2B", + "displaytext": "vGPU V100D-2B", + "cpunumber": 2, + "cpuspeed": 1600, \# in MHz + "memory": 3072, \# In MBs + }, + "GRID V100D-32Q": + { + "name": "vGPU V100D-32Q", + "displaytext": "vGPU V100D-32Q", + "cpunumber": 2, + "cpuspeed": 1600, \# in MHz + "memory": 3072, \# In MBs + }, + "GRID V100D-16A": + { + "name": "vGPU V100D-16A", + "displaytext": "vGPU V100D-16A", + "cpunumber": 2, + "cpuspeed": 1600, \# in MHz + "memory": 3072, \# In MBs + }, + "GRID V100D-1Q": + { + "name": "vGPU V100D-1Q", + "displaytext": "vGPU V100D-1Q", + "cpunumber": 2, + "cpuspeed": 1600, \# in MHz + "memory": 3072, \# In MBs + }, + "GRID V100D-2B4": + { + "name": "vGPU V100D-2B4", + "displaytext": "vGPU V100D-2B4", + "cpunumber": 2, + "cpuspeed": 1600, \# in MHz + "memory": 3072, \# In MBs + }, + "GRID V100D-16Q": + { + "name": "vGPU V100D-16Q", + "displaytext": "vGPU V100D-16Q", + "cpunumber": 2, + "cpuspeed": 1600, \# in MHz + "memory": 3072, \# In MBs + }, + "GRID V100D-8A": + { + "name": "vGPU V100D-8A", + "displaytext": "vGPU V100D-8A", + "cpunumber": 2, + "cpuspeed": 1600, \# in MHz + "memory": 3072, \# In MBs + }, + "GRID V100D-1A": + { + "name": "vGPU V100D-1A", + "displaytext": "vGPU V100D-1A", + "cpunumber": 2, + "cpuspeed": 1600, \# in MHz + "memory": 3072, \# In MBs + }, "GRID K260Q": { "name": "vGPU260Q", diff --git a/utils/src/main/java/com/cloud/utils/SwiftUtil.java b/utils/src/main/java/com/cloud/utils/SwiftUtil.java index 7c2914d262de..69b860213df3 100644 --- a/utils/src/main/java/com/cloud/utils/SwiftUtil.java +++ b/utils/src/main/java/com/cloud/utils/SwiftUtil.java @@ -309,4 +309,4 @@ private static String getMeta(Map metas) { } return cms.toString(); } -} \ No newline at end of file +}