From 95c93e2b54ff0dcb5d7a4dd0f7f78e21c789f511 Mon Sep 17 00:00:00 2001 From: Ravi Chunduru Date: Tue, 16 Jul 2013 04:18:47 -0700 Subject: [PATCH 0001/4438] Adds support for Openstack Networking FWaaS (Firewall) blueprint quantum-fwaas-devstack Change-Id: I3c546433415ab18a5933a25774a06df7c4cb42e9 --- lib/horizon | 7 ++++++- lib/neutron | 26 ++++++++++++++++++++++++-- lib/neutron_plugins/services/firewall | 27 +++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 3 deletions(-) create mode 100644 lib/neutron_plugins/services/firewall diff --git a/lib/horizon b/lib/horizon index 89bd65901c..1e758bfc43 100644 --- a/lib/horizon +++ b/lib/horizon @@ -50,7 +50,7 @@ function _horizon_config_set() { if [ -n "$line" ]; then sed -i -e "/^$section/,/^}/ s/^\( *'$option'\) *:.*$/\1: $value,/" $file else - sed -i -e "/^$section/ a\n '$option': $value,\n" $file + sed -i -e "/^$section/a\ '$option': $value," $file fi else echo -e "\n\n$section = {\n '$option': $value,\n}" >> $file @@ -96,6 +96,11 @@ function init_horizon() { _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_lb True fi + # enable firewall dashboard in case service is enabled + if is_service_enabled q-fwaas; then + _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_firewall True + fi + # Initialize the horizon database (it stores sessions and notices shown to # users). The user system is external (keystone). cd $HORIZON_DIR diff --git a/lib/neutron b/lib/neutron index 31876dee88..be831185ca 100644 --- a/lib/neutron +++ b/lib/neutron @@ -207,6 +207,10 @@ source $TOP_DIR/lib/neutron_plugins/services/loadbalancer # Hardcoding for 1 service plugin for now source $TOP_DIR/lib/neutron_plugins/services/vpn +# Firewall Service Plugin functions +# -------------------------------- +source $TOP_DIR/lib/neutron_plugins/services/firewall + # Use security group or not if has_neutron_plugin_security_group; then Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} @@ -230,6 +234,9 @@ function configure_neutron() { if is_service_enabled q-vpn; then _configure_neutron_vpn fi + if is_service_enabled q-fwaas; then + _configure_neutron_fwaas + fi if is_service_enabled q-svc; then _configure_neutron_service fi @@ -418,11 +425,17 @@ function start_neutron_agents() { screen_it q-agt "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" screen_it q-dhcp "cd $NEUTRON_DIR && python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE" + L3_CONF_FILES="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE" + + if is_service_enabled q-fwaas; then + L3_CONF_FILES="$L3_CONF_FILES --config-file $Q_FWAAS_CONF_FILE" + fi if is_service_enabled q-vpn; then - screen_it q-vpn "cd $NEUTRON_DIR && $AGENT_VPN_BINARY --config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE" + screen_it q-vpn "cd $NEUTRON_DIR && $AGENT_VPN_BINARY $L3_CONF_FILES" else - screen_it q-l3 "cd $NEUTRON_DIR && python $AGENT_L3_BINARY --config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE" + screen_it q-l3 "cd $NEUTRON_DIR && python $AGENT_L3_BINARY $L3_CONF_FILES" fi + screen_it q-meta "cd $NEUTRON_DIR && python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE" if [ "$VIRT_DRIVER" = 'xenserver' ]; then @@ -554,6 +567,10 @@ function _configure_neutron_l3_agent() { AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"} Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini + if is_service_enabled q-fwaas; then + Q_FWAAS_CONF_FILE=$NEUTRON_CONF_DIR/fwaas_driver.ini + fi + cp $NEUTRON_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE iniset $Q_L3_CONF_FILE DEFAULT verbose True @@ -586,6 +603,11 @@ function _configure_neutron_lbaas() { neutron_agent_lbaas_configure_agent } +function _configure_neutron_fwaas() { + neutron_fwaas_configure_common + neutron_fwaas_configure_driver +} + function _configure_neutron_vpn() { neutron_vpn_install_agent_packages diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall new file mode 100644 index 0000000000..1597e8577d --- /dev/null +++ b/lib/neutron_plugins/services/firewall @@ -0,0 +1,27 @@ +# Neutron firewall plugin +# --------------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +FWAAS_PLUGIN=neutron.services.firewall.fwaas_plugin.FirewallPlugin + +function neutron_fwaas_configure_common() { + if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then + Q_SERVICE_PLUGIN_CLASSES=$FWAAS_PLUGIN + else + Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$FWAAS_PLUGIN" + fi +} + +function neutron_fwaas_configure_driver() { + FWAAS_DRIVER_CONF_FILENAME=/etc/neutron/fwaas_driver.ini + cp $NEUTRON_DIR/etc/fwaas_driver.ini $FWAAS_DRIVER_CONF_FILENAME + + iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas enabled True + iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas driver "neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver" +} + +# Restore xtrace +$MY_XTRACE From c02b2f87cc9f8b75f5d1eb42b31d1117266a1aec Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Tue, 30 Jul 2013 19:43:10 +0100 Subject: [PATCH 0002/4438] xenapi: Use a jeos vm as a template DevStack was using templating to speed up the setup process with XenServer. The template already included some devstack customisations, not just a clean OS. This change modifies devstack behaviour, so that the template is a simple clean operating system. This makes it easier to use custom OS as a template, potentially speeding up the tests. related to blueprint xenapi-devstack-cleanup Change-Id: I6cb0a7ed7a90e749b78329a8e2b65fb8b7fcfa5f --- tools/xen/install_os_domU.sh | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index e762f6d875..92b131795b 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -167,8 +167,8 @@ fi # GUEST_NAME=${GUEST_NAME:-"DevStackOSDomU"} -TNAME="devstack_template" -SNAME_PREPARED="template_prepared" +TNAME="jeos_template_for_devstack" +SNAME_TEMPLATE="jeos_snapshot_for_devstack" SNAME_FIRST_BOOT="before_first_boot" function wait_for_VM_to_halt() { @@ -234,21 +234,8 @@ if [ -z "$templateuuid" ]; then vm_uuid=$(xe_min vm-list name-label="$GUEST_NAME") xe vm-param-set actions-after-reboot=Restart uuid="$vm_uuid" - # - # Prepare VM for DevStack - # - - # Install XenServer tools, and other such things - $THIS_DIR/prepare_guest_template.sh "$GUEST_NAME" - - # start the VM to run the prepare steps - xe vm-start vm="$GUEST_NAME" - - # Wait for prep script to finish and shutdown system - wait_for_VM_to_halt - # Make template from VM - snuuid=$(xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_PREPARED") + snuuid=$(xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_TEMPLATE") xe snapshot-clone uuid=$snuuid new-name-label="$TNAME" else # @@ -257,6 +244,19 @@ else vm_uuid=$(xe vm-install template="$TNAME" new-name-label="$GUEST_NAME") fi +# +# Prepare VM for DevStack +# + +# Install XenServer tools, and other such things +$THIS_DIR/prepare_guest_template.sh "$GUEST_NAME" + +# start the VM to run the prepare steps +xe vm-start vm="$GUEST_NAME" + +# Wait for prep script to finish and shutdown system +wait_for_VM_to_halt + ## Setup network cards # Wipe out all destroy_all_vifs_of "$GUEST_NAME" From 05901f46f75e1c80a2448bda463e1dd6b937e7e7 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Thu, 1 Aug 2013 10:44:22 -0700 Subject: [PATCH 0003/4438] Remove unused keystone params from neutron agents' config files DHCP, L3 and Metadata agents' config files no longer need to duplicate this info; it's available in neutron.conf Change-Id: I7bea25d1c2b9249ddacce3f4638f7a8ed4f43197 --- lib/neutron | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/neutron b/lib/neutron index 835f900425..bfae486633 100644 --- a/lib/neutron +++ b/lib/neutron @@ -523,7 +523,6 @@ function _configure_neutron_debug_command() { # be cleaned. iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper "$Q_RR_COMMAND" - _neutron_setup_keystone $NEUTRON_TEST_CONFIG_FILE DEFAULT set_auth_url _neutron_setup_interface_driver $NEUTRON_TEST_CONFIG_FILE neutron_plugin_configure_debug_command @@ -540,7 +539,6 @@ function _configure_neutron_dhcp_agent() { iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - _neutron_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url _neutron_setup_interface_driver $Q_DHCP_CONF_FILE neutron_plugin_configure_dhcp_agent @@ -561,7 +559,6 @@ function _configure_neutron_l3_agent() { iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - _neutron_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url _neutron_setup_interface_driver $Q_L3_CONF_FILE neutron_plugin_configure_l3_agent @@ -578,7 +575,6 @@ function _configure_neutron_metadata_agent() { iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - _neutron_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url } function _configure_neutron_lbaas() { From 5e28a3e2d2e7f02d6f0c81ddfe4ae3b0387112b6 Mon Sep 17 00:00:00 2001 From: Kui Shi Date: Fri, 2 Aug 2013 17:26:28 +0800 Subject: [PATCH 0004/4438] Add call trace in error message Call trace can help user to locate problem quickly. stack.sh uses bash as interpreter, which defines a series of "Shell Variables": BASH_SOURCE: An array variable whose members are the source filenames BASH_LINENO: An array variable whose members are the line numbers in source files where each corresponding member of FUNCNAME was invoked. FUNCNAME: An array variable containing the names of all shell functions currently in the execution call stack. run "man bash" and search the variable name to get detailed info. In function backtrace, it gets the call deepth from ${#BASH_SOURCE[@]}, then print the call stack from top to down. In function die, backtrace is called with parameter "2" to ignore the call trace of function "die" and "backtrace". I add a broken function in lib/database, and call it in stack.sh, the output looks like this: [Call Trace] ./stack.sh:104:broken /home/kui/osd/devstack/lib/database:24:die [ERROR] ./stack.sh:24 It is broken Fixes bug # 1207660 Change-Id: I04d0b3ccf783c769e41582c20f48694c19917334 --- functions | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/functions b/functions index 262f70f29f..d28efefe55 100644 --- a/functions +++ b/functions @@ -76,6 +76,19 @@ function cp_it { } +# Prints backtrace info +# filename:lineno:function +function backtrace { + local level=$1 + local deep=$((${#BASH_SOURCE[@]} - 1)) + echo "[Call Trace]" + while [ $level -le $deep ]; do + echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}" + deep=$((deep - 1)) + done +} + + # Prints line number and "message" then exits # die $LINENO "message" function die() { @@ -85,6 +98,7 @@ function die() { if [ $exitcode == 0 ]; then exitcode=1 fi + backtrace 2 err $line "$*" exit $exitcode } From 17df0775edaf1d45c59a41147779fd65fd986911 Mon Sep 17 00:00:00 2001 From: Kui Shi Date: Fri, 2 Aug 2013 17:55:41 +0800 Subject: [PATCH 0005/4438] misleading source filename in error message when ./stack.sh encounters error, the output may look like this: [ERROR] ./stack.sh:698 nova-api did not start The source filename is wrong. Actually, it should be like this: [ERROR] //lib/nova:698 nova-api did not start stack.sh uses bash as interpreter, which define "Shell Variables" BASH_SOURCE: An array variable whose members are the source filenames where the corresponding shell function names in the FUNCNAME array variable are defined. The shell function ${FUNCNAME[$i]} is defined in the file ${BASH_SOURCE[$i]} and called from ${BASH_SOURCE[$i+1]}. The function "err" is called by function "die" ( and "err_if_not_set", and "err_if_not_set" is not used at all). ${BASH_SOURCE[2]} will ignore the deepest two call entries, which corresponding to the shell functions: "err" and "die". In one sentence, this change will print the source filename where the function is defined and exits via function "die". Fixes bug #1207658 Change-Id: I2aa6642c5cf4cfe781afe278b3dec3e7cba277fa --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 262f70f29f..43ff1a7731 100644 --- a/functions +++ b/functions @@ -113,7 +113,7 @@ function err() { local exitcode=$? errXTRACE=$(set +o | grep xtrace) set +o xtrace - local msg="[ERROR] $0:$1 $2" + local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2" echo $msg 1>&2; if [[ -n ${SCREEN_LOGDIR} ]]; then echo $msg >> "${SCREEN_LOGDIR}/error.log" From d3a18ae1ecc757008ee7686f709209a930d90ab8 Mon Sep 17 00:00:00 2001 From: Arata Notsu Date: Fri, 2 Aug 2013 20:58:56 +0900 Subject: [PATCH 0006/4438] Update baremetal to work with the latest DIB Adjust DevStack to the current DIB's naming to kernel/ramdisk. BM_HOST_CURRENT_KERNEL is removed since the kernel is extracted from a diskimage with the ramdisk and the host's kernel is not used. BM_BUILD_DEPLOY_RAMDISK is added to control whether use DIB or not. If you set BM_BUILD_DEPLOY_RAMDISK=False, you must BM_DEPLOY_KERNEL and BM_DEPLOY_RAMDISK to point existing deploy kernel/ramdisk. Fixes bug 1207719 Change-Id: I62af0b1942b07ac12665c0ed3619d64c1cccbe1f --- lib/baremetal | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index 145544d40c..8f6c3f1660 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -138,9 +138,12 @@ BM_FLAVOR_ARCH=${BM_FLAVOR_ARCH:-$BM_CPU_ARCH} BM_IMAGE_BUILD_DIR=${BM_IMAGE_BUILD_DIR:-$DEST/diskimage-builder} BM_POSEUR_DIR=${BM_POSEUR_DIR:-$DEST/bm_poseur} -BM_HOST_CURRENT_KERNEL=$(uname -r) -BM_DEPLOY_RAMDISK=${BM_DEPLOY_RAMDISK:-bm-deploy-$BM_HOST_CURRENT_KERNEL-initrd} -BM_DEPLOY_KERNEL=${BM_DEPLOY_KERNEL:-bm-deploy-$BM_HOST_CURRENT_KERNEL-vmlinuz} +# Use DIB to create deploy ramdisk and kernel. +BM_BUILD_DEPLOY_RAMDISK=`trueorfalse True $BM_BUILD_DEPLOY_RAMDISK` +# If not use DIB, these files are used as deploy ramdisk/kernel. +# (The value must be a relative path from $TOP_DIR/files/) +BM_DEPLOY_RAMDISK=${BM_DEPLOY_RAMDISK:-} +BM_DEPLOY_KERNEL=${BM_DEPLOY_KERNEL:-} # If you need to add any extra flavors to the deploy ramdisk image # eg, specific network drivers, specify them here @@ -233,13 +236,13 @@ function configure_baremetal_nova_dirs() { function upload_baremetal_deploy() { token=$1 - if [ ! -e $TOP_DIR/files/$BM_DEPLOY_KERNEL -a -e /boot/vmlinuz-$BM_HOST_CURRENT_KERNEL ]; then - sudo cp /boot/vmlinuz-$BM_HOST_CURRENT_KERNEL $TOP_DIR/files/$BM_DEPLOY_KERNEL - sudo chmod a+r $TOP_DIR/files/$BM_DEPLOY_KERNEL - fi - if [ ! -e $TOP_DIR/files/$BM_DEPLOY_RAMDISK ]; then - $BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR deploy \ - -o $TOP_DIR/files/$BM_DEPLOY_RAMDISK -k $BM_HOST_CURRENT_KERNEL + if [ "$BM_BUILD_DEPLOY_RAMDISK" = "True" ]; then + BM_DEPLOY_KERNEL=bm-deploy.kernel + BM_DEPLOY_RAMDISK=bm-deploy.initramfs + if [ ! -e "$TOP_DIR/files/$BM_DEPLOY_KERNEL" -o ! -e "$TOP_DIR/files/$BM_DEPLOY_RAMDISK" ]; then + $BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR deploy \ + -o $TOP_DIR/files/bm-deploy + fi fi # load them into glance From 3ea28ece4a71b0137050314af0e4f3e55046db11 Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Mon, 5 Aug 2013 12:24:32 +0000 Subject: [PATCH 0007/4438] Correctly setup ML2 mechanism_drivers The ML2 code in devstack was not correctly configuring the mechanism_drivers when asked to do so. This corrects the typo in the variable assignment, and also actually sets these in the plugin configuration file. Fixes bug 1208557 Change-Id: I3746ca099f45d44dcf1cc2ca1c3726745b8e8a1d --- lib/neutron_plugins/ml2 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index ff49d8e6b8..00bd716309 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -20,7 +20,7 @@ Q_AGENT=${Q_AGENT:-openvswitch} source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent # List of MechanismDrivers to load -Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_PLUGIN_MECHANISM_DRIVERS:-} +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-} # List of Type Drivers to load Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,gre,vxlan} # Default GRE TypeDriver options @@ -92,6 +92,8 @@ function neutron_plugin_configure_service() { # Since we enable the tunnel TypeDrivers, also enable a local_ip iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP + populate_ml2_config mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS /$Q_PLUGIN_CONF_FILE ml2 + populate_ml2_config type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS /$Q_PLUGIN_CONF_FILE ml2 populate_ml2_config $Q_SRV_EXTRA_OPTS /$Q_PLUGIN_CONF_FILE ml2 From cf2d0d3db9bda81a6795d5e57e893fea234b462c Mon Sep 17 00:00:00 2001 From: stack Date: Mon, 5 Aug 2013 04:51:56 -0400 Subject: [PATCH 0008/4438] Add keystoneclient support for cinder. Add an ability to ask keystone about users and groups through keystoneclient in cinder. blueprint volume-acl Change-Id: Ice261e9709833d057722b4f13c404df54e10b204 --- lib/cinder | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/cinder b/lib/cinder index ef7e3dc9cc..3472dcd519 100644 --- a/lib/cinder +++ b/lib/cinder @@ -296,6 +296,10 @@ function configure_cinder() { -e 's/snapshot_autoextend_percent =.*/snapshot_autoextend_percent = 20/' \ /etc/lvm/lvm.conf fi + iniset $CINDER_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT + iniset $CINDER_CONF keystone_authtoken admin_user cinder + iniset $CINDER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $CINDER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD } From 1a794a3d9e8ada8a4ac671cba392d6ed53d99e18 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Tue, 6 Aug 2013 15:25:01 +0200 Subject: [PATCH 0009/4438] Show ip address before associating address In order to see is the instance has a fixed ip at the moment. Change-Id: I506f2f099a03e8b63f1f2daeb564ed72f1322a68 --- exercises/euca.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index 5b0d1ba493..b8b283a8fb 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -129,7 +129,8 @@ else # Allocate floating address FLOATING_IP=`euca-allocate-address | cut -f2` die_if_not_set $LINENO FLOATING_IP "Failure allocating floating IP" - + # describe all instances at this moment + euca-describe-instances # Associate floating address euca-associate-address -i $INSTANCE $FLOATING_IP || \ die $LINENO "Failure associating address $FLOATING_IP to $INSTANCE" From 32e1603e9581746d0a4020b2db9f5b399c1a26c8 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Mon, 29 Jul 2013 15:51:43 +0100 Subject: [PATCH 0010/4438] Default to xenserver driver if xenserver-core is installed Fixes bug 1209205 Change-Id: I63085cc87610a59fc48e519e4351c9233b3961be --- stackrc | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/stackrc b/stackrc index 1e08d1614d..c81906ac8c 100644 --- a/stackrc +++ b/stackrc @@ -180,8 +180,11 @@ SPICE_BRANCH=${SPICE_BRANCH:-master} # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can -# also install an **LXC**, **OpenVZ** or **XenAPI** based system. -VIRT_DRIVER=${VIRT_DRIVER:-libvirt} +# also install an **LXC**, **OpenVZ** or **XenAPI** based system. If xenserver-core +# is installed, the default will be XenAPI +DEFAULT_VIRT_DRIVER=libvirt +is_package_installed xenserver-core && DEFAULT_VIRT_DRIVER=xenserver +VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER} case "$VIRT_DRIVER" in libvirt) LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} From 389b3a02335887a3d6dbc73b0d0b8476a0f69c33 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Thu, 1 Aug 2013 10:44:09 +1200 Subject: [PATCH 0011/4438] Support heat in standalone mode. The following localrc will launch only heat in standalone mode and allow it to provision within the openstack specified by the configured keystone endpoint: HEAT_STANDALONE=True ENABLED_SERVICES=rabbit,mysql,heat,h-api,h-api-cfn,h-api-cw,h-eng KEYSTONE_SERVICE_HOST=... KEYSTONE_AUTH_HOST=... Change-Id: I0d8a541fc9d592577423b074c789829f8b8d6702 --- README.md | 17 +++++++++++++++++ lib/heat | 6 ++++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 46d3f96a9f..95c90bc924 100644 --- a/README.md +++ b/README.md @@ -181,6 +181,23 @@ The above will default in devstack to using the OVS on each compute host. To cha Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS VLAN TypeDriver options. Defaults to none. Q_AGENT_EXTRA_AGENT_OPTS Extra configuration options to pass to the OVS or LinuxBridge Agent. +# Heat + +Heat is disabled by default. To enable it you'll need the following settings +in your `localrc` : + + enable_service heat h-api h-api-cfn h-api-cw h-eng + +Heat can also run in standalone mode, and be configured to orchestrate +on an external OpenStack cloud. To launch only Heat in standalone mode +you'll need the following settings in your `localrc` : + + disable_all_services + enable_service rabbit mysql heat h-api h-api-cfn h-api-cw h-eng + HEAT_STANDALONE=True + KEYSTONE_SERVICE_HOST=... + KEYSTONE_AUTH_HOST=... + # Tempest If tempest has been successfully configured, a basic set of smoke tests can be run as follows: diff --git a/lib/heat b/lib/heat index 85177738dc..1b715f2b55 100644 --- a/lib/heat +++ b/lib/heat @@ -30,7 +30,7 @@ set +o xtrace HEAT_DIR=$DEST/heat HEATCLIENT_DIR=$DEST/python-heatclient HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat} - +HEAT_STANDALONE=`trueorfalse False $HEAT_STANDALONE` # Functions # --------- @@ -83,6 +83,7 @@ function configure_heat() { iniset $HEAT_API_CFN_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api-cfn iniset $HEAT_API_CFN_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 iniset $HEAT_API_CFN_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens + [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_API_CFN_CONF paste_deploy flavor standalone iniset_rpc_backend heat $HEAT_API_CFN_CONF DEFAULT @@ -104,7 +105,7 @@ function configure_heat() { iniset $HEAT_API_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api iniset $HEAT_API_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 iniset $HEAT_API_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens - + [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_API_CONF paste_deploy flavor standalone iniset_rpc_backend heat $HEAT_API_CONF DEFAULT @@ -142,6 +143,7 @@ function configure_heat() { iniset $HEAT_API_CW_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api-cloudwatch iniset $HEAT_API_CW_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 iniset $HEAT_API_CW_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens + [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_API_CW_CONF paste_deploy flavor standalone iniset_rpc_backend heat $HEAT_API_CW_CONF DEFAULT From 62d1d698a0c1459e2519938259175cfed86f4a55 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 1 Aug 2013 17:40:40 -0500 Subject: [PATCH 0012/4438] Add tools/install_pip.sh Install a known working recent version of pip that handles installation dependencies more correctly than before. Extract to a separate script so it can be used apart from stack.sh. * Install distro setuptools if it not already present * Install pip from source tarball as get-pip.py proved to be unreliable * Remove python-distribute and python-pip from all prereq files, move python-setuptools to 'general' * Remove the earlier unfubar_setuptppls() call that attenpted to fix this * Only update requirements.txt when no changes in repo Tested on Precise, F18 and CentOS6. * Fedora and RHEL allow pip to install packages ON TOP OF RPM-installed packages. THIS IS BROKEN. And is one reason we have to be so picky about order and so forth. Change-Id: Ibb4b42119dc2e51577c77bbbbffb110863e5324d --- files/apts/general | 2 +- files/apts/keystone | 1 - files/apts/ryu | 1 - files/apts/swift | 1 - files/rpms-suse/general | 2 +- files/rpms-suse/keystone | 2 - files/rpms-suse/ryu | 2 - files/rpms-suse/swift | 2 - files/rpms/general | 2 +- files/rpms/keystone | 3 +- files/rpms/ryu | 1 - files/rpms/swift | 1 - functions | 7 ++- stack.sh | 24 ++++---- tools/install_pip.sh | 118 +++++++++++++++++++++++++++++++++++++++ 15 files changed, 139 insertions(+), 30 deletions(-) create mode 100755 tools/install_pip.sh diff --git a/files/apts/general b/files/apts/general index fdf8e20ad5..fcf0b5b06e 100644 --- a/files/apts/general +++ b/files/apts/general @@ -1,6 +1,6 @@ bridge-utils pylint -python-pip +python-setuptools screen unzip wget diff --git a/files/apts/keystone b/files/apts/keystone index c98409faaf..564921b78b 100644 --- a/files/apts/keystone +++ b/files/apts/keystone @@ -1,4 +1,3 @@ -python-setuptools python-dev python-lxml python-pastescript diff --git a/files/apts/ryu b/files/apts/ryu index 4a4fc523b5..e8ed926c1e 100644 --- a/files/apts/ryu +++ b/files/apts/ryu @@ -1,4 +1,3 @@ -python-setuptools python-gevent python-gflags python-netifaces diff --git a/files/apts/swift b/files/apts/swift index 1c283cf6f0..37d5bc049e 100644 --- a/files/apts/swift +++ b/files/apts/swift @@ -10,7 +10,6 @@ python-greenlet python-netifaces python-nose python-pastedeploy -python-setuptools python-simplejson python-webob python-xattr diff --git a/files/rpms-suse/general b/files/rpms-suse/general index f28267c044..355af885d3 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -6,8 +6,8 @@ iputils openssh openssl psmisc +python-setuptools # instead of python-distribute; dist:sle11sp2 python-cmd2 # dist:opensuse-12.3 -python-pip python-pylint python-unittest2 python-virtualenv diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone index 7d9a7bfe9b..403d82f926 100644 --- a/files/rpms-suse/keystone +++ b/files/rpms-suse/keystone @@ -7,8 +7,6 @@ python-Routes python-SQLAlchemy python-WebOb python-devel -python-distribute -python-setuptools # instead of python-distribute; dist:sle11sp2 python-greenlet python-lxml python-mysql diff --git a/files/rpms-suse/ryu b/files/rpms-suse/ryu index 90b43a47d9..3797b6cb44 100644 --- a/files/rpms-suse/ryu +++ b/files/rpms-suse/ryu @@ -1,5 +1,3 @@ -python-distribute -python-setuptools # instead of python-distribute; dist:sle11sp2 python-Sphinx python-gevent python-netifaces diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift index db379bbcdf..f3c95aad98 100644 --- a/files/rpms-suse/swift +++ b/files/rpms-suse/swift @@ -6,8 +6,6 @@ python-WebOb python-configobj python-coverage python-devel -python-distribute -python-setuptools # instead of python-distribute; dist:sle11sp2 python-eventlet python-greenlet python-netifaces diff --git a/files/rpms/general b/files/rpms/general index 9fa305c992..2db31d1db0 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -11,7 +11,7 @@ libxml2-devel # dist:rhel6 [2] libxslt-devel # dist:rhel6 [2] psmisc pylint -python-pip +python-setuptools python-prettytable # dist:rhel6 [1] python-unittest2 python-virtualenv diff --git a/files/rpms/keystone b/files/rpms/keystone index 33a4f47ccf..52dbf477d8 100644 --- a/files/rpms/keystone +++ b/files/rpms/keystone @@ -4,10 +4,9 @@ python-paste #dist:f16,f17,f18,f19 python-paste-deploy #dist:f16,f17,f18,f19 python-paste-script #dist:f16,f17,f18,f19 python-routes -python-setuptools #dist:f16,f17,f18,f19 python-sqlalchemy python-sqlite2 python-webob sqlite -# Deps installed via pip for RHEL \ No newline at end of file +# Deps installed via pip for RHEL diff --git a/files/rpms/ryu b/files/rpms/ryu index 0f62f9fc1f..e8ed926c1e 100644 --- a/files/rpms/ryu +++ b/files/rpms/ryu @@ -1,5 +1,4 @@ python-gevent python-gflags python-netifaces -python-setuptools #dist:f16,f17,f18,f19 python-sphinx diff --git a/files/rpms/swift b/files/rpms/swift index 2cc4a0bf39..b137f30dce 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -10,7 +10,6 @@ python-greenlet python-netifaces python-nose python-paste-deploy # dist:f16,f17,f18,f19 -python-setuptools # dist:f16,f17,f18,f19 python-simplejson python-webob pyxattr diff --git a/functions b/functions index fe37e4c3de..14ed1801c1 100644 --- a/functions +++ b/functions @@ -1140,8 +1140,11 @@ function setup_develop() { echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" - (cd $REQUIREMENTS_DIR; \ - $SUDO_CMD python update.py $project_dir) + # Don't update repo if local changes exist + if (cd $project_dir && git diff --quiet); then + (cd $REQUIREMENTS_DIR; \ + $SUDO_CMD python update.py $project_dir) + fi pip_install -e $project_dir # ensure that further actions can do things like setup.py sdist diff --git a/stack.sh b/stack.sh index c2e6fe4626..36f427f849 100755 --- a/stack.sh +++ b/stack.sh @@ -578,18 +578,8 @@ set -o xtrace echo_summary "Installing package prerequisites" source $TOP_DIR/tools/install_prereqs.sh -install_rpc_backend - -if is_service_enabled $DATABASE_BACKENDS; then - install_database -fi - -if is_service_enabled neutron; then - install_neutron_agent_packages -fi - -# Unbreak the giant mess that is the current state of setuptools -unfubar_setuptools +# Configure an appropriate python environment +$TOP_DIR/tools/install_pip.sh # System-specific preconfigure # ============================ @@ -642,6 +632,16 @@ if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then sudo ln -sf /usr/bin/nosetests1.1 /usr/local/bin/nosetests fi +install_rpc_backend + +if is_service_enabled $DATABASE_BACKENDS; then + install_database +fi + +if is_service_enabled neutron; then + install_neutron_agent_packages +fi + TRACK_DEPENDS=${TRACK_DEPENDS:-False} # Install python packages into a virtualenv so that we can track them diff --git a/tools/install_pip.sh b/tools/install_pip.sh new file mode 100755 index 0000000000..0ea8f536f6 --- /dev/null +++ b/tools/install_pip.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash + +# **install_pip.sh** + +# install_pip.sh [--pip-version ] [--use-get-pip] [--setuptools] [--force] +# +# Update pip and friends to a known common version + +# Assumptions: +# - currently we try to leave the system setuptools alone, install +# the system package if it is not already present +# - update pip to $INSTALL_PIP_VERSION + +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=`cd $TOOLS_DIR/..; pwd` + +# Change dir to top of devstack +cd $TOP_DIR + +# Import common functions +source $TOP_DIR/functions + +FILES=$TOP_DIR/files + +# Handle arguments + +INSTALL_PIP_VERSION=${INSTALL_PIP_VERSION:-"1.4"} +while [[ -n "$1" ]]; do + case $1 in + --force) + FORCE=1 + ;; + --pip-version) + INSTALL_PIP_VERSION="$2" + shift + ;; + --setuptools) + SETUPTOOLS=1 + ;; + --use-get-pip) + USE_GET_PIP=1; + ;; + esac + shift +done + +SETUPTOOLS_EZ_SETUP_URL=https://bitbucket.org/pypa/setuptools/raw/bootstrap/ez_setup.py +PIP_GET_PIP_URL=https://raw.github.com/pypa/pip/master/contrib/get-pip.py +PIP_TAR_URL=https://pypi.python.org/packages/source/p/pip/pip-$INSTALL_PIP_VERSION.tar.gz + +GetDistro +echo "Distro: $DISTRO" + +function get_versions() { + PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null) + if [[ -n $PIP ]]; then + DISTRIBUTE_VERSION=$($PIP freeze | grep 'distribute==') + SETUPTOOLS_VERSION=$($PIP freeze | grep 'setuptools==') + PIP_VERSION=$($PIP --version | awk '{ print $2}') + echo "pip: $PIP_VERSION setuptools: $SETUPTOOLS_VERSION distribute: $DISTRIBUTE_VERSION" + fi +} + +function setuptools_ez_setup() { + if [[ ! -r $FILES/ez_setup.py ]]; then + (cd $FILES; \ + curl -OR $SETUPTOOLS_EZ_SETUP_URL; \ + ) + fi + sudo python $FILES/ez_setup.py +} + +function install_get_pip() { + if [[ ! -r $FILES/get-pip.py ]]; then + (cd $FILES; \ + curl $PIP_GET_PIP_URL; \ + ) + fi + sudo python $FILES/get-pip.py +} + +function install_pip_tarball() { + curl -O $PIP_TAR_URL + tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz + cd pip-$INSTALL_PIP_VERSION + sudo python setup.py install +} + +# Show starting versions +get_versions + +# Do setuptools +if [[ -n "$SETUPTOOLS" ]]; then + # We want it from source + uninstall_package python-setuptools + setuptools_ez_setup +else + # See about installing the distro setuptools + if ! python -c "import setuptools"; then + install_package python-setuptools + fi +fi + +# Do pip +if [[ -z $PIP || "$PIP_VERSION" != "$INSTALL_PIP_VERSION" || -n $FORCE ]]; then + + # Eradicate any and all system packages + uninstall_package python-pip + + if [[ -n "$USE_GET_PIP" ]]; then + install_get_pip + else + install_pip_tarball + fi + + get_versions +fi From b3862f98718317042dd48871d50da1e5255c0329 Mon Sep 17 00:00:00 2001 From: Mike Perez Date: Tue, 12 Feb 2013 02:16:41 -0800 Subject: [PATCH 0013/4438] Add Cinder V2 API to keystone catalog Support both SQL and templated keystone backend. This will add an additional endpoint to go with v1. Change-Id: I19168d55c2ffad2b1cd668b6c1341dc8e49e9c1f --- files/default_catalog.templates | 6 ++++++ lib/cinder | 12 ++++++++++++ 2 files changed, 18 insertions(+) diff --git a/files/default_catalog.templates b/files/default_catalog.templates index 1ecf890241..277904a8e3 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -24,6 +24,12 @@ catalog.RegionOne.volume.internalURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id catalog.RegionOne.volume.name = Volume Service +catalog.RegionOne.volumev2.publicURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s +catalog.RegionOne.volumev2.adminURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s +catalog.RegionOne.volumev2.internalURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s +catalog.RegionOne.volumev2.name = Volume Service V2 + + catalog.RegionOne.ec2.publicURL = http://%SERVICE_HOST%:8773/services/Cloud catalog.RegionOne.ec2.adminURL = http://%SERVICE_HOST%:8773/services/Admin catalog.RegionOne.ec2.internalURL = http://%SERVICE_HOST%:8773/services/Cloud diff --git a/lib/cinder b/lib/cinder index 3472dcd519..2bdc29b0ae 100644 --- a/lib/cinder +++ b/lib/cinder @@ -339,6 +339,18 @@ create_cinder_accounts() { --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" + CINDER_V2_SERVICE=$(keystone service-create \ + --name=cinder \ + --type=volumev2 \ + --description="Cinder Volume Service V2" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $CINDER_V2_SERVICE \ + --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \ + --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \ + --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" + fi fi } From 2bfbc779c7254604d666edca87a7a582b2c7ac40 Mon Sep 17 00:00:00 2001 From: Gordon Chung Date: Fri, 9 Aug 2013 10:55:12 -0400 Subject: [PATCH 0014/4438] Cinder configuration is not set up for Ceilometer enable cinder notifications when ceilometer is enabled Change-Id: I55809f1cef35aca90f8513a73df1417dcf08098d Fixes:Bug1210269 --- lib/cinder | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/cinder b/lib/cinder index 3472dcd519..14950c7f4d 100644 --- a/lib/cinder +++ b/lib/cinder @@ -234,6 +234,10 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL + if is_service_enabled ceilometer; then + iniset $CINDER_CONF DEFAULT notification_driver "cinder.openstack.common.notifier.rpc_notifier" + fi + if is_service_enabled tls-proxy; then # Set the service port for a proxy to take the original iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT From 385152cd890affbf1d1526a3fb14abe71b3d3ac6 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Fri, 9 Aug 2013 11:13:28 -0400 Subject: [PATCH 0015/4438] Enable debug logging on tempest With tempest moving to testr (serially currently) the log level is no longer defaulting to debug as it did with nose. To get the same level of verbosity in the logging as when running with nose this commit sets the debug flag on tempest. Change-Id: I6acd57be0f8188d31825d88471ba9883ebb30519 --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index aaa7281a98..b97f0d86a9 100644 --- a/lib/tempest +++ b/lib/tempest @@ -201,6 +201,7 @@ function configure_tempest() { mkdir -p $TEMPEST_STATE_PATH iniset $TEMPEST_CONF DEFAULT use_stderr False iniset $TEMPEST_CONF DEFAULT log_file tempest.log + iniset $TEMPEST_CONF DEFAULT debug True # Timeouts iniset $TEMPEST_CONF compute build_timeout $BUILD_TIMEOUT From db5fadb5cb768820df54fc3d1c7428a57b511582 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 9 Aug 2013 13:41:33 -0400 Subject: [PATCH 0016/4438] cleanup potentially installed older oslo.config If the user had oslo.config installed prior to us setting up the oslo.config out of git they can get themselves into this very funny situation where pip doesn't see oslo.config 1.1.x, however some packages might. This manifests itself as a user error trying to start nova-api which uses DeprecatedOption, not in oslo.config 1.1.x Because of the funny state pip is in, you can't uninstall oslo.config. So in these situations, if we see old oslo.config in the filesystem, pip install / uninstall it to ensure that everyone ends up using the git version instead. To reduce the amount of user confusion, do this on every install_oslo for a while, which we can purge after Havana ships. Change-Id: If92073be5a431840701c952a194e63a7c452c9ca --- clean.sh | 1 + lib/oslo | 15 +++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/clean.sh b/clean.sh index 493c449fca..f7d15dfe4e 100755 --- a/clean.sh +++ b/clean.sh @@ -56,6 +56,7 @@ if [[ -n "$SESSION" ]]; then fi # Clean projects +cleanup_oslo cleanup_cinder cleanup_glance cleanup_keystone diff --git a/lib/oslo b/lib/oslo index 1eb13dbf3d..de5ec4e83e 100644 --- a/lib/oslo +++ b/lib/oslo @@ -27,6 +27,10 @@ OSLOMSG_DIR=$DEST/oslo.messaging # install_oslo() - Collect source and prepare function install_oslo() { + # TODO(sdague): remove this once we get to Icehouse, this just makes + # for a smoother transition of existing users. + cleanup_oslo + git_clone $OSLOCFG_REPO $OSLOCFG_DIR $OSLOCFG_BRANCH setup_develop $OSLOCFG_DIR @@ -34,6 +38,17 @@ function install_oslo() { setup_develop $OSLOMSG_DIR } +# cleanup_oslo() - purge possibly old versions of oslo +function cleanup_oslo() { + # this means we've got an old olso installed, lets get rid of it + if find /usr | grep oslo.config | grep -v oslo.config.egg-link > /dev/null; then + echo "Found old oslo.config... removing to ensure consistency" + local PIP_CMD=$(get_pip_command) + pip_install olso.config + sudo $PIP_CMD uninstall -y olso.config + fi +} + # Restore xtrace $XTRACE From 376b6316608fe72bc4a0bd997e1c94f76b086588 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Mon, 29 Jul 2013 13:10:25 +0100 Subject: [PATCH 0017/4438] Force $DEST to have wider permissions This is particularly useful in the case where we create the home directory above and the permissions are too strict. Other users, such as the apache user, need read/execute for this directory. Change-Id: I908d993dbcd863b482030afcc04e5e7b9f4cffa1 --- stack.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stack.sh b/stack.sh index 880529d5af..d4c0eab608 100755 --- a/stack.sh +++ b/stack.sh @@ -234,8 +234,10 @@ else fi # Create the destination directory and ensure it is writable by the user +# and read/executable by everybody for daemons (e.g. apache run for horizon) sudo mkdir -p $DEST sudo chown -R $STACK_USER $DEST +chmod 0755 $DEST # a basic test for $DEST path permissions (fatal on error unless skipped) check_path_perm_sanity ${DEST} From 4b600898743a19f18e83c938eb15744bb2dc13c1 Mon Sep 17 00:00:00 2001 From: John Griffith Date: Sat, 10 Aug 2013 17:48:07 +0000 Subject: [PATCH 0018/4438] Add some missing pkgs to Cinder install Added python-dev even though it's picked up by other projects already, just to be explicit. Also added libpq-dev, this way users can just run "sudo pip install -r test-requirements.txt" and perform everything in run_tests.sh without the need for venv. Change-Id: I3953032ac40ef78fc6f67d77539e13539fbbb2ac --- files/apts/cinder | 2 ++ files/rpms-suse/cinder | 2 ++ files/rpms/cinder | 2 ++ 3 files changed, 6 insertions(+) diff --git a/files/apts/cinder b/files/apts/cinder index c45b97f5a2..32cb3a0039 100644 --- a/files/apts/cinder +++ b/files/apts/cinder @@ -1,3 +1,5 @@ tgt lvm2 qemu-utils +libpq-dev +python-dev diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder index 8f4a5a7998..49e2cb8249 100644 --- a/files/rpms-suse/cinder +++ b/files/rpms-suse/cinder @@ -1,3 +1,5 @@ lvm2 tgt qemu-tools +python-devel +postgresql-devel diff --git a/files/rpms/cinder b/files/rpms/cinder index 19dedffe91..699f2fc22c 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -1,3 +1,5 @@ lvm2 scsi-target-utils qemu-img +python-devel +postgresql-devel From 9acc12a3921a261c7ae7a1902871183a6a5b64da Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 9 Aug 2013 15:09:31 -0500 Subject: [PATCH 0019/4438] More Python package fixes * Add tools/fixup_stuff.sh to fix prettytable and httplib2 install with pip 1.4+ * Cache downloads properly in tools/install_pip.sh Change-Id: I482590cb91f7a10c1436bc9015afd572ac1cc73e --- stack.sh | 4 ++++ tools/fixup_stuff.sh | 43 +++++++++++++++++++++++++++++++++++++++++++ tools/install_pip.sh | 10 ++++++---- 3 files changed, 53 insertions(+), 4 deletions(-) create mode 100755 tools/fixup_stuff.sh diff --git a/stack.sh b/stack.sh index 22a23c81d3..aca49d0ae9 100755 --- a/stack.sh +++ b/stack.sh @@ -581,6 +581,10 @@ source $TOP_DIR/tools/install_prereqs.sh # Configure an appropriate python environment $TOP_DIR/tools/install_pip.sh +# Do the ugly hacks for borken packages and distros +$TOP_DIR/tools/fixup_stuff.sh + + # System-specific preconfigure # ============================ diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh new file mode 100755 index 0000000000..60d0f468e0 --- /dev/null +++ b/tools/fixup_stuff.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +# **fixup_stuff.sh** + +# fixup_stuff.sh +# +# All distro and package specific hacks go in here +# - prettytable 0.7.2 permissions are 600 in the package and +# pip 1.4 doesn't fix it (1.3 did) +# - httplib2 0.8 permissions are 600 in the package and +# pip 1.4 doesn't fix it (1.3 did) + +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=`cd $TOOLS_DIR/..; pwd` + +# Change dir to top of devstack +cd $TOP_DIR + +# Import common functions +source $TOP_DIR/functions + +FILES=$TOP_DIR/files + +# Pre-install affected packages so we can fix the permissions +sudo pip install prettytable +sudo pip install httplib2 + +SITE_DIRS=$(python -c "import site; import os; print os.linesep.join(site.getsitepackages())") +for dir in $SITE_DIRS; do + + # Fix prettytable 0.7.2 permissions + if [[ -r $dir/prettytable.py ]]; then + sudo chmod +r $dir/prettytable-0.7.2*/* + fi + + # Fix httplib2 0.8 permissions + httplib_dir=httplib2-0.8.egg-info + if [[ -d $dir/$httplib_dir ]]; then + sudo chmod +r $dir/$httplib_dir/* + fi + +done diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 0ea8f536f6..6e3e9d2104 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -81,10 +81,12 @@ function install_get_pip() { } function install_pip_tarball() { - curl -O $PIP_TAR_URL - tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz - cd pip-$INSTALL_PIP_VERSION - sudo python setup.py install + (cd $FILES; \ + curl -O $PIP_TAR_URL; \ + tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz; \ + cd pip-$INSTALL_PIP_VERSION; \ + sudo python setup.py install; \ + ) } # Show starting versions From dace92f557a3c07a80bb9a5d9e480810d81611e9 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sat, 10 Aug 2013 23:49:47 -0300 Subject: [PATCH 0020/4438] Stop doing special things with setuptools pip 1.4 can handle the distribute/setuptools upgrade sequencing appropriate. So it turns out all we need to upgrade is pip, and then the rest will fall in to place. This will still not fix the packages vs. pip interactions, but we don't to muck with the system setuptools packages at all. Change-Id: I99220ccc190798c3eb77bb2361abc6606bd546b4 --- tools/install_pip.sh | 52 ++++++++++---------------------------------- 1 file changed, 11 insertions(+), 41 deletions(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 0ea8f536f6..64cc20052e 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -2,13 +2,11 @@ # **install_pip.sh** -# install_pip.sh [--pip-version ] [--use-get-pip] [--setuptools] [--force] +# install_pip.sh [--pip-version ] [--use-get-pip] [--force] # # Update pip and friends to a known common version # Assumptions: -# - currently we try to leave the system setuptools alone, install -# the system package if it is not already present # - update pip to $INSTALL_PIP_VERSION # Keep track of the current directory @@ -25,7 +23,7 @@ FILES=$TOP_DIR/files # Handle arguments -INSTALL_PIP_VERSION=${INSTALL_PIP_VERSION:-"1.4"} +INSTALL_PIP_VERSION=${INSTALL_PIP_VERSION:-"1.4.1"} while [[ -n "$1" ]]; do case $1 in --force) @@ -35,9 +33,6 @@ while [[ -n "$1" ]]; do INSTALL_PIP_VERSION="$2" shift ;; - --setuptools) - SETUPTOOLS=1 - ;; --use-get-pip) USE_GET_PIP=1; ;; @@ -45,7 +40,6 @@ while [[ -n "$1" ]]; do shift done -SETUPTOOLS_EZ_SETUP_URL=https://bitbucket.org/pypa/setuptools/raw/bootstrap/ez_setup.py PIP_GET_PIP_URL=https://raw.github.com/pypa/pip/master/contrib/get-pip.py PIP_TAR_URL=https://pypi.python.org/packages/source/p/pip/pip-$INSTALL_PIP_VERSION.tar.gz @@ -55,21 +49,11 @@ echo "Distro: $DISTRO" function get_versions() { PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null) if [[ -n $PIP ]]; then - DISTRIBUTE_VERSION=$($PIP freeze | grep 'distribute==') - SETUPTOOLS_VERSION=$($PIP freeze | grep 'setuptools==') PIP_VERSION=$($PIP --version | awk '{ print $2}') - echo "pip: $PIP_VERSION setuptools: $SETUPTOOLS_VERSION distribute: $DISTRIBUTE_VERSION" + echo "pip: $PIP_VERSION" fi } -function setuptools_ez_setup() { - if [[ ! -r $FILES/ez_setup.py ]]; then - (cd $FILES; \ - curl -OR $SETUPTOOLS_EZ_SETUP_URL; \ - ) - fi - sudo python $FILES/ez_setup.py -} function install_get_pip() { if [[ ! -r $FILES/get-pip.py ]]; then @@ -90,29 +74,15 @@ function install_pip_tarball() { # Show starting versions get_versions -# Do setuptools -if [[ -n "$SETUPTOOLS" ]]; then - # We want it from source - uninstall_package python-setuptools - setuptools_ez_setup -else - # See about installing the distro setuptools - if ! python -c "import setuptools"; then - install_package python-setuptools - fi -fi - # Do pip -if [[ -z $PIP || "$PIP_VERSION" != "$INSTALL_PIP_VERSION" || -n $FORCE ]]; then - # Eradicate any and all system packages - uninstall_package python-pip - - if [[ -n "$USE_GET_PIP" ]]; then - install_get_pip - else - install_pip_tarball - fi +# Eradicate any and all system packages +uninstall_package python-pip - get_versions +if [[ -n "$USE_GET_PIP" ]]; then + install_get_pip +else + install_pip_tarball fi + +get_versions From d5cccad2f0655b59e1db9219458f8bc35edb9ad1 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Fri, 19 Jul 2013 10:34:24 +1200 Subject: [PATCH 0021/4438] Generate heat images for tempest tests This requires HEAT_CREATE_TEST_IMAGE to be set for any images to be created. If the file (files/fedora-vm-heat-cfntools-tempest.qcow2) already exists then disk-image-create will not be called, and the existing image file will be registered with glance. This is most likely to happen in the following scenarios: - a second run of stack.sh - the image has been pre-built elsewhere (such as during devstack-gate image building) Change-Id: I276573a20927e72f2cb68784f655c1ba1913ae8a --- lib/heat | 15 +++++++++++++++ lib/tempest | 7 +++++++ 2 files changed, 22 insertions(+) diff --git a/lib/heat b/lib/heat index 1b715f2b55..92b4e50ee2 100644 --- a/lib/heat +++ b/lib/heat @@ -197,6 +197,21 @@ function stop_heat() { done } +function disk_image_create { + local elements_path=$1 + local elements=$2 + local arch=$3 + local output=$TOP_DIR/files/$4 + if [[ -f "$output.qcow2" ]]; + then + echo "Image file already exists: $output_file" + else + ELEMENTS_PATH=$elements_path disk-image-create \ + $elements -a $arch -o $output + fi + # upload with fake URL so that image in $TOP_DIR/files is used + upload_image "http://localhost/$output.qcow2" $TOKEN +} # Restore xtrace $XTRACE diff --git a/lib/tempest b/lib/tempest index aaa7281a98..5142f24a2e 100644 --- a/lib/tempest +++ b/lib/tempest @@ -24,6 +24,7 @@ # ``DEFAULT_INSTANCE_TYPE`` # ``DEFAULT_INSTANCE_USER`` # ``CINDER_MULTI_LVM_BACKEND`` +# ``HEAT_CREATE_TEST_IMAGE`` # ``stack.sh`` calls the entry points in this order: # # install_tempest @@ -271,6 +272,12 @@ function configure_tempest() { iniset $TEMPEST_CONF boto http_socket_timeout 30 iniset $TEMPEST_CONF boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + # Orchestration test image + if [ $HEAT_CREATE_TEST_IMAGE == "True" ]; then + disk_image_create /usr/share/tripleo-image-elements "vm fedora heat-cfntools" "i386" "fedora-vm-heat-cfntools-tempest" + iniset $TEMPEST_CONF orchestration image_ref "fedora-vm-heat-cfntools-tempest" + fi + # Scenario iniset $TEMPEST_CONF scenario img_dir "$FILES/images/cirros-0.3.1-x86_64-uec" From 556ffe402252b8e993f7849a2d7e959adc8c6291 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Tue, 6 Aug 2013 16:42:38 +1200 Subject: [PATCH 0022/4438] Colorize heat engine log Change-Id: If6ffb234e360e8a579eb8e1e7baedb90354b10ae --- lib/heat | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/heat b/lib/heat index 92b4e50ee2..568f4d00ca 100644 --- a/lib/heat +++ b/lib/heat @@ -125,6 +125,14 @@ function configure_heat() { iniset_rpc_backend heat $HEAT_ENGINE_CONF DEFAULT + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + # Add color to logging output + iniset $HEAT_ENGINE_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $HEAT_ENGINE_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $HEAT_ENGINE_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" + iniset $HEAT_ENGINE_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + fi + # Cloudwatch API HEAT_API_CW_CONF=$HEAT_CONF_DIR/heat-api-cloudwatch.conf cp $HEAT_DIR/etc/heat/heat-api-cloudwatch.conf $HEAT_API_CW_CONF From 99405a45aa3930a9b695d9a1c1dabf0d967e48ad Mon Sep 17 00:00:00 2001 From: Roman Gorodeckij Date: Wed, 7 Aug 2013 09:20:36 -0400 Subject: [PATCH 0023/4438] Pip install fails because of --use-mirrors parameter Having --use-mirrors parameter in pip commands causes pip to hang on some distros. Pypi uses CDN for long time already, so there's no point to keep this parameter no more. Wipe PIP_USE_MIRRORS out of the "function" file. Change-Id: I70adaf6591834af2482e09eb7f8f9f60df8e7692 Closes-Bug: #1069309 --- functions | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/functions b/functions index e9c60615e6..087a0ea844 100644 --- a/functions +++ b/functions @@ -944,13 +944,9 @@ function pip_install { CMD_PIP=$(get_pip_command) fi - if is_fedora && [[ $DISTRO =~ (rhel6) ]]; then - # RHEL6 pip by default doesn't have this (was introduced - # around 0.8.1 or so) - PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False} - else - PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-True} - fi + # Mirror option not needed anymore because pypi has CDN available, + # but it's useful in certain circumstances + PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False} if [[ "$PIP_USE_MIRRORS" != "False" ]]; then PIP_MIRROR_OPT="--use-mirrors" fi From bf10ac55a99d226a81bdbc7e6bd1e85b4f48652d Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Sat, 10 Aug 2013 21:27:54 +0000 Subject: [PATCH 0024/4438] Add auth config for neutron metadata proxy * Without auth config, the proxy will return 500 errors on cloud-init requests, which will cause VM connectivity checks to fail. * A cleaner fix would be for the metadata proxy to reuse the configuration from the keystone_authtoken section of neutron.conf, but I chose the easier route because of a pending switch from REST to RPC communication (RPC won't need the auth config). * Fixes bug 1210664 Change-Id: Iaa3c74f5ada2404119c44c8cbdad380eda158f66 --- lib/neutron | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/lib/neutron b/lib/neutron index 306140a4b5..3b8dcf59a6 100644 --- a/lib/neutron +++ b/lib/neutron @@ -577,6 +577,8 @@ function _configure_neutron_metadata_agent() { iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + _neutron_setup_keystone $Q_META_CONF_FILE DEFAULT True True + } function _configure_neutron_lbaas() { @@ -687,6 +689,7 @@ function _neutron_setup_keystone() { local conf_file=$1 local section=$2 local use_auth_url=$3 + local skip_auth_cache=$4 if [[ -n $use_auth_url ]]; then iniset $conf_file $section auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0" else @@ -697,11 +700,13 @@ function _neutron_setup_keystone() { iniset $conf_file $section admin_tenant_name $SERVICE_TENANT_NAME iniset $conf_file $section admin_user $Q_ADMIN_USERNAME iniset $conf_file $section admin_password $SERVICE_PASSWORD - iniset $conf_file $section signing_dir $NEUTRON_AUTH_CACHE_DIR - # Create cache dir - sudo mkdir -p $NEUTRON_AUTH_CACHE_DIR - sudo chown $STACK_USER $NEUTRON_AUTH_CACHE_DIR - rm -f $NEUTRON_AUTH_CACHE_DIR/* + if [[ -z $skip_auth_cache ]]; then + iniset $conf_file $section signing_dir $NEUTRON_AUTH_CACHE_DIR + # Create cache dir + sudo mkdir -p $NEUTRON_AUTH_CACHE_DIR + sudo chown $STACK_USER $NEUTRON_AUTH_CACHE_DIR + rm -f $NEUTRON_AUTH_CACHE_DIR/* + fi } function _neutron_setup_interface_driver() { From 039979424bebc71b94f53f51030eda5e9d2b7734 Mon Sep 17 00:00:00 2001 From: Ben Nemec Date: Sat, 10 Aug 2013 09:56:16 -0500 Subject: [PATCH 0025/4438] Allow disabling of debug logging I find that enabling the debug log level often causes me to miss important error messages due to the sheer volume of information logged. This change allows configuration of the debug option in a number of the projects so it can be disabled globally without having to make one-off changes after each re-stack. Note that this does not apply to Keystone or Swift right now. They use a different method to configure their logging level and I'm not as familiar with them so I didn't want to mess with their settings. Change-Id: I185d496543d245a644854c8a37f3359377cb978c --- lib/cinder | 2 +- lib/glance | 6 +++--- lib/heat | 8 ++++---- lib/neutron | 10 +++++----- lib/nova | 2 +- stack.sh | 3 +++ 6 files changed, 17 insertions(+), 14 deletions(-) diff --git a/lib/cinder b/lib/cinder index 14950c7f4d..f49eda15be 100644 --- a/lib/cinder +++ b/lib/cinder @@ -212,7 +212,7 @@ function configure_cinder() { cp $CINDER_DIR/etc/cinder/cinder.conf.sample $CINDER_CONF iniset $CINDER_CONF DEFAULT auth_strategy keystone - iniset $CINDER_CONF DEFAULT debug True + iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $CINDER_CONF DEFAULT verbose True if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then iniset $CINDER_CONF DEFAULT enabled_backends lvmdriver-1,lvmdriver-2 diff --git a/lib/glance b/lib/glance index 583f879555..a18189f474 100644 --- a/lib/glance +++ b/lib/glance @@ -71,7 +71,7 @@ function configure_glance() { # Copy over our glance configurations and update them cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF - iniset $GLANCE_REGISTRY_CONF DEFAULT debug True + iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file local dburl=`database_connection_url glance` iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $dburl @@ -87,7 +87,7 @@ function configure_glance() { iniset $GLANCE_REGISTRY_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/registry cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF - iniset $GLANCE_API_CONF DEFAULT debug True + iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL inicomment $GLANCE_API_CONF DEFAULT log_file iniset $GLANCE_API_CONF DEFAULT sql_connection $dburl iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG @@ -123,7 +123,7 @@ function configure_glance() { cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI cp $GLANCE_DIR/etc/glance-cache.conf $GLANCE_CACHE_CONF - iniset $GLANCE_CACHE_CONF DEFAULT debug True + iniset $GLANCE_CACHE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL inicomment $GLANCE_CACHE_CONF DEFAULT log_file iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_CACHE_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ diff --git a/lib/heat b/lib/heat index 1b715f2b55..3c3b2c4a2d 100644 --- a/lib/heat +++ b/lib/heat @@ -68,7 +68,7 @@ function configure_heat() { # Cloudformation API HEAT_API_CFN_CONF=$HEAT_CONF_DIR/heat-api-cfn.conf cp $HEAT_DIR/etc/heat/heat-api-cfn.conf $HEAT_API_CFN_CONF - iniset $HEAT_API_CFN_CONF DEFAULT debug True + iniset $HEAT_API_CFN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL inicomment $HEAT_API_CFN_CONF DEFAULT log_file iniset $HEAT_API_CFN_CONF DEFAULT use_syslog $SYSLOG iniset $HEAT_API_CFN_CONF DEFAULT bind_host $HEAT_API_CFN_HOST @@ -90,7 +90,7 @@ function configure_heat() { # OpenStack API HEAT_API_CONF=$HEAT_CONF_DIR/heat-api.conf cp $HEAT_DIR/etc/heat/heat-api.conf $HEAT_API_CONF - iniset $HEAT_API_CONF DEFAULT debug True + iniset $HEAT_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL inicomment $HEAT_API_CONF DEFAULT log_file iniset $HEAT_API_CONF DEFAULT use_syslog $SYSLOG iniset $HEAT_API_CONF DEFAULT bind_host $HEAT_API_HOST @@ -112,7 +112,7 @@ function configure_heat() { # engine HEAT_ENGINE_CONF=$HEAT_CONF_DIR/heat-engine.conf cp $HEAT_DIR/etc/heat/heat-engine.conf $HEAT_ENGINE_CONF - iniset $HEAT_ENGINE_CONF DEFAULT debug True + iniset $HEAT_ENGINE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL inicomment $HEAT_ENGINE_CONF DEFAULT log_file iniset $HEAT_ENGINE_CONF DEFAULT use_syslog $SYSLOG iniset $HEAT_ENGINE_CONF DEFAULT bind_host $HEAT_ENGINE_HOST @@ -128,7 +128,7 @@ function configure_heat() { # Cloudwatch API HEAT_API_CW_CONF=$HEAT_CONF_DIR/heat-api-cloudwatch.conf cp $HEAT_DIR/etc/heat/heat-api-cloudwatch.conf $HEAT_API_CW_CONF - iniset $HEAT_API_CW_CONF DEFAULT debug True + iniset $HEAT_API_CW_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL inicomment $HEAT_API_CW_CONF DEFAULT log_file iniset $HEAT_API_CW_CONF DEFAULT use_syslog $SYSLOG iniset $HEAT_API_CW_CONF DEFAULT bind_host $HEAT_API_CW_HOST diff --git a/lib/neutron b/lib/neutron index 306140a4b5..564315b107 100644 --- a/lib/neutron +++ b/lib/neutron @@ -537,7 +537,7 @@ function _configure_neutron_dhcp_agent() { cp $NEUTRON_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE iniset $Q_DHCP_CONF_FILE DEFAULT verbose True - iniset $Q_DHCP_CONF_FILE DEFAULT debug True + iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" @@ -557,7 +557,7 @@ function _configure_neutron_l3_agent() { cp $NEUTRON_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE iniset $Q_L3_CONF_FILE DEFAULT verbose True - iniset $Q_L3_CONF_FILE DEFAULT debug True + iniset $Q_L3_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" @@ -573,7 +573,7 @@ function _configure_neutron_metadata_agent() { cp $NEUTRON_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE iniset $Q_META_CONF_FILE DEFAULT verbose True - iniset $Q_META_CONF_FILE DEFAULT debug True + iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" @@ -597,7 +597,7 @@ function _configure_neutron_plugin_agent() { # ensure that an agent's configuration can override the default iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND" iniset $NEUTRON_CONF DEFAULT verbose True - iniset $NEUTRON_CONF DEFAULT debug True + iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL # Configure agent for plugin neutron_plugin_configure_plugin_agent @@ -620,7 +620,7 @@ function _configure_neutron_service() { fi iniset $NEUTRON_CONF DEFAULT verbose True - iniset $NEUTRON_CONF DEFAULT debug True + iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $NEUTRON_CONF DEFAULT policy_file $Q_POLICY_FILE iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP diff --git a/lib/nova b/lib/nova index 9c38498d8c..842c098624 100644 --- a/lib/nova +++ b/lib/nova @@ -430,7 +430,7 @@ function create_nova_conf() { # (Re)create ``nova.conf`` rm -f $NOVA_CONF iniset $NOVA_CONF DEFAULT verbose "True" - iniset $NOVA_CONF DEFAULT debug "True" + iniset $NOVA_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" iniset $NOVA_CONF DEFAULT auth_strategy "keystone" iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True" iniset $NOVA_CONF DEFAULT api_paste_config "$NOVA_API_PASTE_INI" diff --git a/stack.sh b/stack.sh index aca49d0ae9..e2703224a4 100755 --- a/stack.sh +++ b/stack.sh @@ -250,6 +250,9 @@ OFFLINE=`trueorfalse False $OFFLINE` # operation. ERROR_ON_CLONE=`trueorfalse False $ERROR_ON_CLONE` +# Whether to enable the debug log level in OpenStack services +ENABLE_DEBUG_LOG_LEVEL=`trueorfalse True $ENABLE_DEBUG_LOG_LEVEL` + # Destination path for service data DATA_DIR=${DATA_DIR:-${DEST}/data} sudo mkdir -p $DATA_DIR From c325227465e5b31936bbab888d2a282be097d01e Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Tue, 13 Aug 2013 00:32:20 -0700 Subject: [PATCH 0026/4438] VMware: Add cinder support to devstack The patch set adds cinder support to devstack. VMware cinder support can be found at: - Nova - https://review.openstack.org/#/c/40245/ - Cinder - https://review.openstack.org/#/c/41600/ Change-Id: I0a05643010ea6cfb6635505accc3dcf411fdd419 --- lib/cinder | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/cinder b/lib/cinder index 14950c7f4d..b7f765b391 100644 --- a/lib/cinder +++ b/lib/cinder @@ -287,6 +287,14 @@ function configure_cinder() { CINDER_GLUSTERFS_SHARES=$(echo $CINDER_GLUSTERFS_SHARES | tr ";" "\n") echo "$CINDER_GLUSTERFS_SHARES" > $CINDER_CONF_DIR/glusterfs_shares fi + elif [ "$CINDER_DRIVER" == "vsphere" ]; then + echo_summary "Using VMware vCenter driver" + iniset $CINDER_CONF DEFAULT enabled_backends vmware + iniset $CINDER_CONF vmware host_ip "$VMWAREAPI_IP" + iniset $CINDER_CONF vmware host_username "$VMWAREAPI_USER" + iniset $CINDER_CONF vmware host_password "$VMWAREAPI_PASSWORD" + iniset $CINDER_CONF vmware cluster_name "$VMWAREAPI_CLUSTER" + iniset $CINDER_CONF vmware volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver" fi if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then From f5dbf8c8ef30c66cd40b07605b4aefa06b3e3c1d Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Tue, 13 Aug 2013 09:02:46 -0700 Subject: [PATCH 0027/4438] Fix option for metadata access in nicira neutron plugin Bug 1211850 Set metadata_mode option rather than enable_metadata_access_network. Change-Id: Ia85aba4d0dfb3e7b21937cf15aebc629e3705aed --- lib/neutron_plugins/nicira | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/nicira b/lib/neutron_plugins/nicira index eabc41730d..e9deb64e11 100644 --- a/lib/neutron_plugins/nicira +++ b/lib/neutron_plugins/nicira @@ -90,7 +90,7 @@ function neutron_plugin_configure_service() { iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID Q_L3_ENABLED=True Q_L3_ROUTER_PER_TENANT=True - iniset /$Q_PLUGIN_CONF_FILE nvp enable_metadata_access_network True + iniset /$Q_PLUGIN_CONF_FILE nvp metadata_mode access_network fi if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID From 025fc5e5f5d7f92f0d0bda7032cf1782b029f28a Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Tue, 13 Aug 2013 18:55:33 +0200 Subject: [PATCH 0028/4438] Faster old oslo.config detection Just search in the path where python searches for modules. Let's use python for searching, it knows the exact rules. Change-Id: I659f734c418ab5e56f4956f418af48dfbe054c8a --- lib/oslo | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/oslo b/lib/oslo index de5ec4e83e..449c4de17f 100644 --- a/lib/oslo +++ b/lib/oslo @@ -41,7 +41,7 @@ function install_oslo() { # cleanup_oslo() - purge possibly old versions of oslo function cleanup_oslo() { # this means we've got an old olso installed, lets get rid of it - if find /usr | grep oslo.config | grep -v oslo.config.egg-link > /dev/null; then + if ! python -c 'import oslo.config' 2>/dev/null; then echo "Found old oslo.config... removing to ensure consistency" local PIP_CMD=$(get_pip_command) pip_install olso.config From cfb708d9c53e1680f21ef63c1715ca2693b9758d Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Tue, 13 Aug 2013 14:34:18 -0400 Subject: [PATCH 0029/4438] Redirect dpkg -l stderr to /dev/null Fixes bug 1211413 Change-Id: I33a7e1e8fb3755c69ca0570e333e4908cb6f3da4 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index e9c60615e6..6ae650eb8e 100644 --- a/functions +++ b/functions @@ -909,7 +909,7 @@ function is_package_installed() { fi if [[ "$os_PACKAGE" = "deb" ]]; then - dpkg -l "$@" > /dev/null + dpkg -l "$@" > /dev/null 2> /dev/null elif [[ "$os_PACKAGE" = "rpm" ]]; then rpm --quiet -q "$@" else From 4669122dc8e50a3c0cead54e227a5a46508fed50 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Mon, 12 Aug 2013 17:28:50 +0100 Subject: [PATCH 0030/4438] Add XenServer to GetDistro's logic This is primarily to satisfy the expectations of various scripts that os_* variables are defined, and will result in a distro similar to "xs6.1.0-59235p" Fixes bug 1211001 Change-Id: I951e1eb3a5e25f4d8773a11b15cf38157b6492fe --- functions | 5 ++++- tools/xen/install_os_domU.sh | 4 ++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/functions b/functions index e9c60615e6..1d50651891 100644 --- a/functions +++ b/functions @@ -387,8 +387,9 @@ GetOSVersion() { # CentOS release 5.5 (Final) # CentOS Linux release 6.0 (Final) # Fedora release 16 (Verne) + # XenServer release 6.2.0-70446c (xenenterprise) os_CODENAME="" - for r in "Red Hat" CentOS Fedora; do + for r in "Red Hat" CentOS Fedora XenServer; do os_VENDOR=$r if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then ver=`sed -e 's/^.* \(.*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release` @@ -451,6 +452,8 @@ function GetDistro() { elif [[ "$os_VENDOR" =~ (Red Hat) || "$os_VENDOR" =~ (CentOS) ]]; then # Drop the . release as we assume it's compatible DISTRO="rhel${os_RELEASE::1}" + elif [[ "$os_VENDOR" =~ (XenServer) ]]; then + DISTRO="xs$os_RELEASE" else # Catch-all for now is Vendor + Release + Update DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE" diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 92b131795b..997644d018 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -29,6 +29,10 @@ THIS_DIR=$(cd $(dirname "$0") && pwd) # xapi functions . $THIS_DIR/functions +# Determine what system we are running on. +# Might not be XenServer if we're using xenserver-core +GetDistro + # # Get Settings # From 6769b166b10272947db77c3f9bfb0d115e8d0a2d Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Mon, 12 Aug 2013 18:18:56 -0700 Subject: [PATCH 0031/4438] Added functions for get a ip on an instance The cause of some gating failure looks like because of getting ip address on instance. However current exercise didn't log the return value. In this commit, we add get_instance_ip function with error hanlding support, and apply it on the execise. Change-Id: I8e17ba68093faafe58a98eb780a032368eea38aa --- exercises/boot_from_volume.sh | 3 ++- exercises/floating_ips.sh | 2 +- exercises/neutron-adv-test.sh | 12 ++++++------ exercises/volumes.sh | 3 ++- functions | 13 +++++++++++++ 5 files changed, 24 insertions(+), 9 deletions(-) diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index a3a14eb5e4..36524ede4b 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -174,7 +174,8 @@ if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | g fi # Get the instance IP -IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) +IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME) + die_if_not_set $LINENO IP "Failure retrieving IP address" # Private IPs can be pinged in single node deployments diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index ac65cf7772..f93a727df6 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -132,7 +132,7 @@ if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | g fi # Get the instance IP -IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) +IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME) die_if_not_set $LINENO IP "Failure retrieving IP address" # Private IPs can be pinged in single node deployments diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index 4367e2e3c1..abb29cf333 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -272,12 +272,12 @@ function create_vms { } function ping_ip { - # Test agent connection. Assumes namespaces are disabled, and - # that DHCP is in use, but not L3 - local VM_NAME=$1 - local NET_NAME=$2 - IP=`nova show $VM_NAME | grep 'network' | awk '{print $5}'` - ping_check $NET_NAME $IP $BOOT_TIMEOUT + # Test agent connection. Assumes namespaces are disabled, and + # that DHCP is in use, but not L3 + local VM_NAME=$1 + local NET_NAME=$2 + IP=$(get_instance_ip $VM_NAME $NET_NAME) + ping_check $NET_NAME $IP $BOOT_TIMEOUT } function check_vm { diff --git a/exercises/volumes.sh b/exercises/volumes.sh index b2b391c5d7..028d19b36a 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -135,7 +135,8 @@ if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | g fi # Get the instance IP -IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) +IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME) + die_if_not_set $LINENO IP "Failure retrieving IP address" # Private IPs can be pinged in single node deployments diff --git a/functions b/functions index 087a0ea844..b56df08673 100644 --- a/functions +++ b/functions @@ -1433,6 +1433,19 @@ function _ping_check_novanet() { fi } +# Get ip of instance +function get_instance_ip(){ + local vm_id=$1 + local network_name=$2 + local nova_result="$(nova show $vm_id)" + local ip=$(echo "$nova_result" | grep "$network_name" | get_field 2) + if [[ $ip = "" ]];then + echo "$nova_result" + die $LINENO "[Fail] Coudn't get ipaddress of VM" + exit 1 + fi + echo $ip +} # ssh check From 248a8cce71754b4a16c60bd161ec566098b81305 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Tue, 6 Aug 2013 08:00:06 +0200 Subject: [PATCH 0032/4438] .conf suffix for the horizon config on Fedora On Fedora by default the *.conf imported only from the /etc/httpd/conf.d/. Changing the default config name to horizon.conf with all distribution in order to have a simple logic. Change-Id: I08c3e825f697640fd73ac1f9c569f313abc3c04f --- lib/horizon | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/horizon b/lib/horizon index 89bd65901c..a879d1e089 100644 --- a/lib/horizon +++ b/lib/horizon @@ -106,13 +106,13 @@ function init_horizon() { sudo mkdir -p $HORIZON_DIR/.blackhole HORIZON_REQUIRE='' - local horizon_conf=/etc/$APACHE_NAME/$APACHE_CONF_DIR/horizon + local horizon_conf=/etc/$APACHE_NAME/$APACHE_CONF_DIR/horizon.conf if is_ubuntu; then # Clean up the old config name sudo rm -f /etc/apache2/sites-enabled/000-default # Be a good citizen and use the distro tools here sudo touch $horizon_conf - sudo a2ensite horizon + sudo a2ensite horizon.conf # WSGI isn't enabled by default, enable it sudo a2enmod wsgi elif is_fedora; then From c2a4c9238d4004f0271d51a5fc9b66bb94ba3a8f Mon Sep 17 00:00:00 2001 From: Alessio Ababilov Date: Fri, 16 Aug 2013 21:53:22 +0300 Subject: [PATCH 0033/4438] Fix 'olso' typo in lib/oslo This enables commit If92073be5a431840701c952a194e63a7c452c9ca for cleaning up potentially installed older oslo.config. Here are its original details. If the user had oslo.config installed prior to us setting up the oslo.config out of git they can get themselves into this very funny situation where pip doesn't see oslo.config 1.1.x, however some packages might. This manifests itself as a user error trying to start nova-api which uses DeprecatedOption, not in oslo.config 1.1.x Because of the funny state pip is in, you can't uninstall oslo.config. So in these situations, if we see old oslo.config in the filesystem, pip install / uninstall it to ensure that everyone ends up using the git version instead. To reduce the amount of user confusion, do this on every install_oslo for a while, which we can purge after Havana ships. Change-Id: I7fa0b70497bf5622f4638da284afe5363a004d3c Fixes: bug #1213089 --- lib/oslo | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/oslo b/lib/oslo index 449c4de17f..f77a4fa941 100644 --- a/lib/oslo +++ b/lib/oslo @@ -40,12 +40,12 @@ function install_oslo() { # cleanup_oslo() - purge possibly old versions of oslo function cleanup_oslo() { - # this means we've got an old olso installed, lets get rid of it + # this means we've got an old oslo installed, lets get rid of it if ! python -c 'import oslo.config' 2>/dev/null; then echo "Found old oslo.config... removing to ensure consistency" local PIP_CMD=$(get_pip_command) - pip_install olso.config - sudo $PIP_CMD uninstall -y olso.config + pip_install oslo.config + sudo $PIP_CMD uninstall -y oslo.config fi } From 41815cdc7bcbd91500f9efad0f4e8d57fa4b284c Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Fri, 16 Aug 2013 14:57:38 -0700 Subject: [PATCH 0034/4438] Echo service start failures. * functions: Previously screen_it would log service start failures by touching a file, this isn't very useful when working with Jenkins. Switch to echo'ing that a service failed to start and pipe that through tee so that we can keep the old behavior of touching a file (note this behavior is slightly modified and the touched file will now have contents). Change-Id: I2d3f272b9a65a9d64dbbc01373a02fccf52f56a8 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 37876e0cc2..19cf4ff813 100644 --- a/functions +++ b/functions @@ -1063,7 +1063,7 @@ function screen_it { sleep 1.5 NL=`echo -ne '\015'` - screen -S $SCREEN_NAME -p $1 -X stuff "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" + screen -S $SCREEN_NAME -p $1 -X stuff "$2 || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" else # Spawn directly without screen run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$service.pid From 4f7dccc34729cf15195f80c753e0f8a27c24171c Mon Sep 17 00:00:00 2001 From: joequant Date: Mon, 19 Aug 2013 11:58:25 +0800 Subject: [PATCH 0035/4438] turn out usb_tablet for libvirt This patch turns out usb_tablet for the libvirt nova.conf file. When usb_tablet is turned on, qemu will poll for usb events and this causes CPU usage even when the qemu is idle. Change-Id: I03e260dfd0873b7d15e01c10c206203833d04e73 --- lib/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/nova b/lib/nova index 842c098624..32a51d3d4d 100644 --- a/lib/nova +++ b/lib/nova @@ -451,6 +451,7 @@ function create_nova_conf() { if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" + iniset $NOVA_CONF DEFAULT use_usb_tablet "False" fi iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF osapi_v3 enabled "True" From 7fb9808e61e9e38eb62a446dee9933d88a6dd086 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 19 Aug 2013 16:16:54 +0200 Subject: [PATCH 0036/4438] Remove useless sources.list Change-Id: I136b568f04f1de35556aa7c3d546c44402254eef --- files/sources.list | 9 --------- 1 file changed, 9 deletions(-) delete mode 100644 files/sources.list diff --git a/files/sources.list b/files/sources.list deleted file mode 100644 index 77a1bfb52e..0000000000 --- a/files/sources.list +++ /dev/null @@ -1,9 +0,0 @@ -deb http://mirror.rackspace.com/ubuntu/ %DIST% main restricted -deb http://mirror.rackspace.com/ubuntu/ %DIST%-updates main restricted -deb http://mirror.rackspace.com/ubuntu/ %DIST% universe -deb http://mirror.rackspace.com/ubuntu/ %DIST%-updates universe -deb http://mirror.rackspace.com/ubuntu/ %DIST% multiverse -deb http://mirror.rackspace.com/ubuntu/ %DIST%-updates multiverse -deb http://security.ubuntu.com/ubuntu %DIST%-security main restricted -deb http://security.ubuntu.com/ubuntu %DIST%-security universe -deb http://security.ubuntu.com/ubuntu %DIST%-security multiverse From 71d5630c4ad2dd74964119a4ad8f16833f61fc21 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Mon, 22 Jul 2013 11:37:42 +0200 Subject: [PATCH 0037/4438] Adds new tag for testonly packages in package lists Also introduces a new parameter INSTALL_TESTONLY_PACKAGES in the stackrc with False as default value. Setting it to True stack.sh will install all packages tagged with the tag testonly in the package lists. Includes needed packages for Ubuntu and Fedora fixes bug #1203680 Change-Id: I911a6601819a34262853bba0658f6751148bfbec --- files/apts/glance | 6 ++++++ files/rpms/glance | 6 ++++++ functions | 26 +++++++++++++++++++++++--- stackrc | 3 +++ 4 files changed, 38 insertions(+), 3 deletions(-) diff --git a/files/apts/glance b/files/apts/glance index a05e9f2ea7..26826a53c7 100644 --- a/files/apts/glance +++ b/files/apts/glance @@ -1,5 +1,10 @@ gcc +libffi-dev # testonly +libmysqlclient-dev # testonly +libpq-dev # testonly +libssl-dev # testonly libxml2-dev +libxslt1-dev # testonly python-dev python-eventlet python-routes @@ -10,3 +15,4 @@ python-wsgiref python-pastedeploy python-xattr python-iso8601 +zlib1g-dev # testonly diff --git a/files/rpms/glance b/files/rpms/glance index 0f113eaa01..dd66171f7a 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -1,5 +1,10 @@ gcc +libffi-devel # testonly libxml2-devel +libxslt-devel # testonly +mysql-devel # testonly +openssl-devel # testonly +postgresql-devel # testonly python-argparse python-devel python-eventlet @@ -9,3 +14,4 @@ python-routes python-sqlalchemy python-wsgiref pyxattr +zlib-devel # testonly diff --git a/functions b/functions index 087a0ea844..5546defa22 100644 --- a/functions +++ b/functions @@ -317,16 +317,36 @@ function get_packages() { continue fi + # Assume we want this package + package=${line%#*} + inst_pkg=1 + + # Look for # dist:xxx in comment if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then # We are using BASH regexp matching feature. package=${BASH_REMATCH[1]} distros=${BASH_REMATCH[2]} # In bash ${VAR,,} will lowecase VAR - [[ ${distros,,} =~ ${DISTRO,,} ]] && echo $package - continue + # Look for a match in the distro list + if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then + # If no match then skip this package + inst_pkg=0 + fi + fi + + # Look for # testonly in comment + if [[ $line =~ (.*)#.*testonly.* ]]; then + package=${BASH_REMATCH[1]} + # Are we installing test packages? (test for the default value) + if [[ $INSTALL_TESTONLY_PACKAGES = "False" ]]; then + # If not installing test packages the skip this package + inst_pkg=0 + fi fi - echo ${line%#*} + if [[ $inst_pkg = 1 ]]; then + echo $package + fi done IFS=$OIFS done diff --git a/stackrc b/stackrc index c81906ac8c..8b97536b50 100644 --- a/stackrc +++ b/stackrc @@ -275,6 +275,9 @@ USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN} # Set default screen name SCREEN_NAME=${SCREEN_NAME:-stack} +# Do not install packages tagged with 'testonly' by default +INSTALL_TESTONLY_PACKAGES=${INSTALL_TESTONLY_PACKAGES:-False} + # Local variables: # mode: shell-script # End: From fac533e38db871631cee33d0e3c94884035851b8 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Wed, 14 Aug 2013 16:04:01 +0200 Subject: [PATCH 0038/4438] Configure bash completion Add bash completion rules to the /etc/bash_completion.d from the cinder, neutron, keystone, nova and nova-manage. This is very fast operation and makes the cli usage easier. Change-Id: Icdcdaf55d58efaaa1afe25fd55f088bf7dc8b3f1 --- lib/cinder | 1 + lib/keystone | 1 + lib/neutron | 1 + lib/nova | 2 ++ 4 files changed, 5 insertions(+) diff --git a/lib/cinder b/lib/cinder index 54cf844831..6fc877d279 100644 --- a/lib/cinder +++ b/lib/cinder @@ -468,6 +468,7 @@ function install_cinder() { function install_cinderclient() { git_clone $CINDERCLIENT_REPO $CINDERCLIENT_DIR $CINDERCLIENT_BRANCH setup_develop $CINDERCLIENT_DIR + sudo install -D -m 0644 -o $STACK_USER {$CINDERCLIENT_DIR/tools/,/etc/bash_completion.d/}cinder.bash_completion } # apply config.d approach for cinder volumes directory diff --git a/lib/keystone b/lib/keystone index e7e0544bb4..0a35dd5d80 100644 --- a/lib/keystone +++ b/lib/keystone @@ -289,6 +289,7 @@ function init_keystone() { function install_keystoneclient() { git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH setup_develop $KEYSTONECLIENT_DIR + sudo install -D -m 0644 -o $STACK_USER {$KEYSTONECLIENT_DIR/tools/,/etc/bash_completion.d/}keystone.bash_completion } # install_keystone() - Collect source and prepare diff --git a/lib/neutron b/lib/neutron index 31876dee88..c46003b08b 100644 --- a/lib/neutron +++ b/lib/neutron @@ -382,6 +382,7 @@ function install_neutron() { function install_neutronclient() { git_clone $NEUTRONCLIENT_REPO $NEUTRONCLIENT_DIR $NEUTRONCLIENT_BRANCH setup_develop $NEUTRONCLIENT_DIR + sudo install -D -m 0644 -o $STACK_USER {$NEUTRONCLIENT_DIR/tools/,/etc/bash_completion.d/}neutron.bash_completion } # install_neutron_agent_packages() - Collect source and prepare diff --git a/lib/nova b/lib/nova index 842c098624..bb9bca2533 100644 --- a/lib/nova +++ b/lib/nova @@ -645,6 +645,7 @@ function init_nova() { function install_novaclient() { git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH setup_develop $NOVACLIENT_DIR + sudo install -D -m 0644 -o $STACK_USER {$NOVACLIENT_DIR/tools/,/etc/bash_completion.d/}nova.bash_completion } # install_nova() - Collect source and prepare @@ -682,6 +683,7 @@ function install_nova() { git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH setup_develop $NOVA_DIR + sudo install -D -m 0644 -o $STACK_USER {$NOVA_DIR/tools/,/etc/bash_completion.d/}nova-manage.bash_completion } # start_nova_api() - Start the API process ahead of other things From ce696b60d77752f74924fa133c45910e9d0ef706 Mon Sep 17 00:00:00 2001 From: Roman Prykhodchenko Date: Fri, 9 Aug 2013 10:40:45 +0300 Subject: [PATCH 0039/4438] Basic support of Ironic Ironic is an OpenStack project than brings a separate service for baremetal provisioning. Currently Ironic is in incubation but it needs to have basic support in devstack to provide automatic deployment testing. Change-Id: Ide65a1379fa207a6c8b2c7d9a4f9c874b10fd9ba --- lib/ironic | 222 +++++++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 21 +++++ stackrc | 4 + unstack.sh | 7 ++ 4 files changed, 254 insertions(+) create mode 100644 lib/ironic diff --git a/lib/ironic b/lib/ironic new file mode 100644 index 0000000000..2ce5038ea4 --- /dev/null +++ b/lib/ironic @@ -0,0 +1,222 @@ +# lib/ironic +# Functions to control the configuration and operation of the **Ironic** service + +# Dependencies: +# ``functions`` file +# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# ``SERVICE_HOST`` +# ``KEYSTONE_TOKEN_FORMAT`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# install_ironic +# configure_ironic +# init_ironic +# start_ironic +# stop_ironic +# cleanup_ironic + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories +IRONIC_DIR=$DEST/ironic +IRONIC_AUTH_CACHE_DIR=${IRONIC_AUTH_CACHE_DIR:-/var/cache/ironic} +IRONIC_CONF_DIR=${IRONIC_CONF_DIR:-/etc/ironic} +IRONIC_CONF_FILE=$IRONIC_CONF_DIR/ironic.conf +IRONIC_ROOTWRAP_CONF=$IRONIC_CONF_DIR/rootwrap.conf +IRONIC_ROOTWRAP_FILTERS=$IRONIC_CONF_DIR/rootwrap.d +IRONIC_POLICY_JSON=$IRONIC_CONF_DIR/policy.json + +# Support entry points installation of console scripts +IRONIC_BIN_DIR=$(get_python_exec_prefix) + +# Ironic connection info. Note the port must be specified. +IRONIC_SERVICE_PROTOCOL=http +IRONIC_HOSTPORT=${IRONIC_HOSTPORT:-$SERVICE_HOST:6385} + + +# Functions +# --------- + +# cleanup_ironic() - Remove residual data files, anything left over from previous +# runs that would need to clean up. +function cleanup_ironic() { + sudo rm -rf $IRONIC_AUTH_CACHE_DIR +} + +# configure_ironic() - Set config files, create data dirs, etc +function configure_ironic() { + if [[ ! -d $IRONIC_CONF_DIR ]]; then + sudo mkdir -p $IRONIC_CONF_DIR + fi + sudo chown $STACK_USER $IRONIC_CONF_DIR + + # Copy over ironic configuration file and configure common parameters. + cp $IRONIC_DIR/etc/ironic/ironic.conf.sample $IRONIC_CONF_FILE + iniset $IRONIC_CONF_FILE DEFAULT debug True + inicomment $IRONIC_CONF_FILE DEFAULT log_file + iniset $IRONIC_CONF_FILE DEFAULT sql_connection `database_connection_url ironic` + iniset $IRONIC_CONF_FILE DEFAULT use_syslog $SYSLOG + + # Configure Ironic conductor, if it was enabled. + if is_service_enabled ir-cond; then + configure_ironic_conductor + fi + + # Configure Ironic API, if it was enabled. + if is_service_enabled ir-api; then + configure_ironic_api + fi +} + +# configure_ironic_api() - Is used by configure_ironic(). Performs +# API specific configuration. +function configure_ironic_api() { + iniset $IRONIC_CONF_FILE keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $IRONIC_CONF_FILE keystone_authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $IRONIC_CONF_FILE keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $IRONIC_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset $IRONIC_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $IRONIC_CONF_FILE keystone_authtoken admin_user ironic + iniset $IRONIC_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD + if is_service_enabled qpid; then + iniset $IRONIC_CONF_FILE DEFAULT notifier_strategy qpid + elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then + iniset $IRONIC_CONF_FILE DEFAULT notifier_strategy rabbit + fi + iniset_rpc_backend ironic $IRONIC_CONF_FILE DEFAULT + iniset $IRONIC_CONF_FILE keystone_authtoken signing_dir $IRONIC_AUTH_CACHE_DIR/api + + cp -p $IRONIC_DIR/etc/ironic/policy.json $IRONIC_POLICY_JSON +} + +# configure_ironic_conductor() - Is used by configure_ironic(). +# Sets conductor specific settings. +function configure_ironic_conductor() { + cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF + cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_ROOTWRAP_FILTERS + + iniset $IRONIC_CONF DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF +} + +# create_ironic_cache_dir() - Part of the init_ironic() process +function create_ironic_cache_dir() { + # Create cache dir + sudo mkdir -p $IRONIC_AUTH_CACHE_DIR/api + sudo chown $STACK_USER $IRONIC_AUTH_CACHE_DIR/api + rm -f $IRONIC_AUTH_CACHE_DIR/api/* + sudo mkdir -p $IRONIC_AUTH_CACHE_DIR/registry + sudo chown $STACK_USER $IRONIC_AUTH_CACHE_DIR/registry + rm -f $IRONIC_AUTH_CACHE_DIR/registry/* +} + +# create_ironic_accounts() - Set up common required ironic accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service ironic admin # if enabled +create_ironic_accounts() { + + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + # Ironic + if [[ "$ENABLED_SERVICES" =~ "ir-api" ]]; then + IRONIC_USER=$(keystone user-create \ + --name=ironic \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=ironic@example.com \ + | grep " id " | get_field 2) + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $IRONIC_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + IRONIC_SERVICE=$(keystone service-create \ + --name=ironic \ + --type=baremetal \ + --description="Ironic baremetal provisioning service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $IRONIC_SERVICE \ + --publicurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT/v1/" \ + --adminurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT/v1/" \ + --internalurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT/v1/" + fi + fi +} + + +# init_ironic() - Initialize databases, etc. +function init_ironic() { + # (Re)create ironic database + recreate_database ironic utf8 + + # Migrate ironic database + $IRONIC_BIN_DIR/ironic-dbsync + + create_ironic_cache_dir + + # Create keystone artifacts for Ironic. + create_ironic_accounts +} + +# install_ironic() - Collect source and prepare +function install_ironic() { + git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH + setup_develop $IRONIC_DIR +} + +# start_ironic() - Start running processes, including screen +function start_ironic() { + # Start Ironic API server, if enabled. + if is_service_enabled ir-api; then + start_ironic_api + fi + + # Start Ironic conductor, if enabled. + if is_service_enabled ir-cond; then + start_ironic_conductor + fi +} + +# start_ironic_api() - Used by start_ironic(). +# Starts Ironic API server. +function start_ironic_api() { + screen_it ir-api "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE" + echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then + die $LINENO "ir-api did not start" + fi +} + +# start_ironic_conductor() - Used by start_ironic(). +# Starts Ironic conductor. +function start_ironic_conductor() { + screen_it ir-cond "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-conductor --config-file=$IRONIC_CONF_FILE" + # TODO(romcheg): Find a way to check whether the conductor has started. +} + +# stop_ironic() - Stop running processes +function stop_ironic() { + # Kill the Ironic screen windows + screen -S $SCREEN_NAME -p ir-api -X kill + screen -S $SCREEN_NAME -p ir-cond -X kill +} + + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index 8f59328792..5094e25b67 100755 --- a/stack.sh +++ b/stack.sh @@ -318,6 +318,7 @@ source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap +source $TOP_DIR/lib/ironic # Set the destination directories for other OpenStack projects OPENSTACKCLIENT_DIR=$DEST/python-openstackclient @@ -778,6 +779,11 @@ if is_service_enabled tls-proxy; then # don't be naive and add to existing line! fi +if is_service_enabled ir-api ir-cond; then + install_ironic + configure_ironic +fi + if [[ $TRACK_DEPENDS = True ]]; then $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then @@ -946,6 +952,15 @@ if is_service_enabled g-reg; then init_glance fi +# Ironic +# ------ + +if is_service_enabled ir-api ir-cond; then + echo_summary "Configuring Ironic" + init_ironic +fi + + # Neutron # ------- @@ -1186,6 +1201,12 @@ if is_service_enabled g-api g-reg; then start_glance fi +# Launch the Ironic services +if is_service_enabled ir-api ir-cond; then + echo_summary "Starting Ironic" + start_ironic +fi + # Create an access key and secret key for nova ec2 register image if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1) diff --git a/stackrc b/stackrc index c81906ac8c..b3e2e148ce 100644 --- a/stackrc +++ b/stackrc @@ -96,6 +96,10 @@ HEATCLIENT_BRANCH=${HEATCLIENT_BRANCH:-master} HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git} HORIZON_BRANCH=${HORIZON_BRANCH:-master} +# baremetal provisionint service +IRONIC_REPO=${IRONIC_REPO:-${GIT_BASE}/openstack/ironic.git} +IRONIC_BRANCH=${IRONIC_BRANCH:-master} + # unified auth system (manages accounts/tokens) KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git} KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master} diff --git a/unstack.sh b/unstack.sh index 2268b90458..84eee4f3c1 100755 --- a/unstack.sh +++ b/unstack.sh @@ -33,6 +33,7 @@ source $TOP_DIR/lib/cinder source $TOP_DIR/lib/horizon source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron +source $TOP_DIR/lib/ironic # Determine what system we are running on. This provides ``os_VENDOR``, # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` @@ -71,6 +72,12 @@ if is_service_enabled s-proxy; then cleanup_swift fi +# Ironic runs daemons +if is_service_enabled ir-api ir-cond; then + stop_ironic + cleanup_ironic +fi + # Apache has the WSGI processes if is_service_enabled horizon; then stop_horizon From 73b21910123704ac64ca3d2ba7f50e90e248d7ea Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Thu, 22 Aug 2013 11:25:21 +0000 Subject: [PATCH 0040/4438] Add support for setting Neutron DHCP agent options. This patch adds support for setting arbitrary Neutron DHCP agent options. An example of using it would be to add this to your localrc: Q_DHCP_EXTRA_DEFAULT_OPTS=(enable_multihost=True) Change-Id: I56d267eafa06c52c3867e3396483f5fde3ee5570 --- lib/neutron | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/neutron b/lib/neutron index 31876dee88..3ab6a4cde9 100644 --- a/lib/neutron +++ b/lib/neutron @@ -541,6 +541,14 @@ function _configure_neutron_dhcp_agent() { iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + # Define extra "DEFAULT" configuration options when q-dhcp is configured by + # defining the array ``Q_DHCP_EXTRA_DEFAULT_OPTS``. + # For Example: ``Q_DHCP_EXTRA_DEFAULT_OPTS=(foo=true bar=2)`` + for I in "${Q_DHCP_EXTRA_DEFAULT_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + iniset $Q_DHCP_CONF_FILE DEFAULT ${I/=/ } + done + _neutron_setup_interface_driver $Q_DHCP_CONF_FILE neutron_plugin_configure_dhcp_agent From 032e45468ecf9f8e1ee6745f03a43e8ec3dd2b59 Mon Sep 17 00:00:00 2001 From: Yong Sheng Gong Date: Sun, 25 Aug 2013 10:21:10 +0800 Subject: [PATCH 0041/4438] change quantum into neutron for neutron configuration values in nova.conf Change quantum into neutron Since nova already supports the new neutron items and values Change-Id: I747eae613c0ec28596ea67da4e98fb3d7f6d93bc Fixes: Bug #1216455 --- lib/neutron | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/lib/neutron b/lib/neutron index 31876dee88..c5874602d2 100644 --- a/lib/neutron +++ b/lib/neutron @@ -250,18 +250,18 @@ function configure_neutron() { } function create_nova_conf_neutron() { - iniset $NOVA_CONF DEFAULT network_api_class "nova.network.quantumv2.api.API" - iniset $NOVA_CONF DEFAULT quantum_admin_username "$Q_ADMIN_USERNAME" - iniset $NOVA_CONF DEFAULT quantum_admin_password "$SERVICE_PASSWORD" - iniset $NOVA_CONF DEFAULT quantum_admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" - iniset $NOVA_CONF DEFAULT quantum_auth_strategy "$Q_AUTH_STRATEGY" - iniset $NOVA_CONF DEFAULT quantum_admin_tenant_name "$SERVICE_TENANT_NAME" - iniset $NOVA_CONF DEFAULT quantum_region_name "RegionOne" - iniset $NOVA_CONF DEFAULT quantum_url "http://$Q_HOST:$Q_PORT" + iniset $NOVA_CONF DEFAULT network_api_class "nova.network.neutronv2.api.API" + iniset $NOVA_CONF DEFAULT neutron_admin_username "$Q_ADMIN_USERNAME" + iniset $NOVA_CONF DEFAULT neutron_admin_password "$SERVICE_PASSWORD" + iniset $NOVA_CONF DEFAULT neutron_admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" + iniset $NOVA_CONF DEFAULT neutron_auth_strategy "$Q_AUTH_STRATEGY" + iniset $NOVA_CONF DEFAULT neutron_admin_tenant_name "$SERVICE_TENANT_NAME" + iniset $NOVA_CONF DEFAULT neutron_region_name "RegionOne" + iniset $NOVA_CONF DEFAULT neutron_url "http://$Q_HOST:$Q_PORT" if [[ "$Q_USE_SECGROUP" == "True" ]]; then LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver - iniset $NOVA_CONF DEFAULT security_group_api quantum + iniset $NOVA_CONF DEFAULT security_group_api neutron fi # set NOVA_VIF_DRIVER and optionally set options in nova_conf @@ -270,7 +270,7 @@ function create_nova_conf_neutron() { iniset $NOVA_CONF DEFAULT libvirt_vif_driver "$NOVA_VIF_DRIVER" iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER" if is_service_enabled q-meta; then - iniset $NOVA_CONF DEFAULT service_quantum_metadata_proxy "True" + iniset $NOVA_CONF DEFAULT service_neutron_metadata_proxy "True" fi } From 8535d8b3fc283ac4ebb7a851b19bf2bff36d78d0 Mon Sep 17 00:00:00 2001 From: Yong Sheng Gong Date: Sun, 25 Aug 2013 11:21:13 +0800 Subject: [PATCH 0042/4438] use keystone service port instead of admin port Change-Id: Iaf1848ecabf100171f741fde0efee5d8f65b7795 Fixes: Bug #1214921 --- lib/neutron | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/lib/neutron b/lib/neutron index 31876dee88..1084e65785 100644 --- a/lib/neutron +++ b/lib/neutron @@ -577,7 +577,7 @@ function _configure_neutron_metadata_agent() { iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - _neutron_setup_keystone $Q_META_CONF_FILE DEFAULT True True + _neutron_setup_keystone $Q_META_CONF_FILE DEFAULT True True True } @@ -690,11 +690,16 @@ function _neutron_setup_keystone() { local section=$2 local use_auth_url=$3 local skip_auth_cache=$4 + local use_service_port=$5 + local keystone_port=$KEYSTONE_AUTH_PORT + if [[ -n $use_service_port ]]; then + keystone_port=$KEYSTONE_SERVICE_PORT + fi if [[ -n $use_auth_url ]]; then - iniset $conf_file $section auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0" + iniset $conf_file $section auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$keystone_port/v2.0" else iniset $conf_file $section auth_host $KEYSTONE_SERVICE_HOST - iniset $conf_file $section auth_port $KEYSTONE_AUTH_PORT + iniset $conf_file $section auth_port $keystone_port iniset $conf_file $section auth_protocol $KEYSTONE_SERVICE_PROTOCOL fi iniset $conf_file $section admin_tenant_name $SERVICE_TENANT_NAME From f645a8504a2b0b824cfa6693a49e5032d0b9d1ee Mon Sep 17 00:00:00 2001 From: Angus Salkeld Date: Mon, 26 Aug 2013 10:13:36 +1000 Subject: [PATCH 0043/4438] Add support for heat enviroments heat now has global environments that make it easy to rename and customise resource behaviour. These are yaml files that need to be in /etc/heat/environment.d/ Change-Id: I5a08c6ce8f5d7222f79aab2be0903ba783c10aa1 --- lib/heat | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index fb4002b7e6..5d6c6aa29b 100644 --- a/lib/heat +++ b/lib/heat @@ -31,6 +31,8 @@ HEAT_DIR=$DEST/heat HEATCLIENT_DIR=$DEST/python-heatclient HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat} HEAT_STANDALONE=`trueorfalse False $HEAT_STANDALONE` +HEAT_CONF_DIR=/etc/heat +HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d # Functions # --------- @@ -39,13 +41,13 @@ HEAT_STANDALONE=`trueorfalse False $HEAT_STANDALONE` # runs that a clean run would need to clean up function cleanup_heat() { sudo rm -rf $HEAT_AUTH_CACHE_DIR + sudo rm -rf $HEAT_ENV_DIR } # configure_heat() - Set config files, create data dirs, etc function configure_heat() { setup_develop $HEAT_DIR - HEAT_CONF_DIR=/etc/heat if [[ ! -d $HEAT_CONF_DIR ]]; then sudo mkdir -p $HEAT_CONF_DIR fi @@ -155,6 +157,12 @@ function configure_heat() { iniset_rpc_backend heat $HEAT_API_CW_CONF DEFAULT + # heat environment + sudo mkdir -p $HEAT_ENV_DIR + sudo chown $STACK_USER $HEAT_ENV_DIR + # copy the default environment + cp $HEAT_DIR/etc/heat/environment.d/* $HEAT_ENV_DIR/ + } # init_heat() - Initialize database From fb71a272db2bc447f2ee7c842f8b245d497b4217 Mon Sep 17 00:00:00 2001 From: Angus Salkeld Date: Mon, 26 Aug 2013 10:15:38 +1000 Subject: [PATCH 0044/4438] Add support for heat resource templates These are resources that are defined as a template and can be customised by a deployer. Change-Id: Ia739a36d627b7cfec26641b89513355e65cd1d1f --- lib/heat | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/heat b/lib/heat index 5d6c6aa29b..8b6fd7fc4b 100644 --- a/lib/heat +++ b/lib/heat @@ -33,6 +33,7 @@ HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat} HEAT_STANDALONE=`trueorfalse False $HEAT_STANDALONE` HEAT_CONF_DIR=/etc/heat HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d +HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates # Functions # --------- @@ -42,6 +43,7 @@ HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d function cleanup_heat() { sudo rm -rf $HEAT_AUTH_CACHE_DIR sudo rm -rf $HEAT_ENV_DIR + sudo rm -rf $HEAT_TEMPLATES_DIR } # configure_heat() - Set config files, create data dirs, etc @@ -163,6 +165,12 @@ function configure_heat() { # copy the default environment cp $HEAT_DIR/etc/heat/environment.d/* $HEAT_ENV_DIR/ + # heat template resources. + sudo mkdir -p $HEAT_TEMPLATES_DIR + sudo chown $STACK_USER $HEAT_TEMPLATES_DIR + # copy the default templates + cp $HEAT_DIR/etc/heat/templates/* $HEAT_TEMPLATES_DIR/ + } # init_heat() - Initialize database From dc4f2342dba3bb37c42f1c0782eb2cb82d3a63a3 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Mon, 19 Aug 2013 23:46:17 -0700 Subject: [PATCH 0045/4438] VMware: update cinder support for VMware configuration settings The review https://review.openstack.org/#/c/41600 was update to have a 'vmware' prefix for all of the VMware cinder settings. These were previously in a 'vmware' section and now they are in the 'DEFAULT' section. Change-Id: I8eadfb0f64914d3b0667760aff651415df48f627 --- lib/cinder | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/lib/cinder b/lib/cinder index 54cf844831..ec5a3563b6 100644 --- a/lib/cinder +++ b/lib/cinder @@ -289,12 +289,11 @@ function configure_cinder() { fi elif [ "$CINDER_DRIVER" == "vsphere" ]; then echo_summary "Using VMware vCenter driver" - iniset $CINDER_CONF DEFAULT enabled_backends vmware - iniset $CINDER_CONF vmware host_ip "$VMWAREAPI_IP" - iniset $CINDER_CONF vmware host_username "$VMWAREAPI_USER" - iniset $CINDER_CONF vmware host_password "$VMWAREAPI_PASSWORD" - iniset $CINDER_CONF vmware cluster_name "$VMWAREAPI_CLUSTER" - iniset $CINDER_CONF vmware volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver" + iniset $CINDER_CONF DEFAULT vmware_host_ip "$VMWAREAPI_IP" + iniset $CINDER_CONF DEFAULT vmware_host_username "$VMWAREAPI_USER" + iniset $CINDER_CONF DEFAULT vmware_host_password "$VMWAREAPI_PASSWORD" + iniset $CINDER_CONF DEFAULT vmware_cluster_name "$VMWAREAPI_CLUSTER" + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver" fi if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then From bb8c6d42a4628f2a696babcc960e293786f67af6 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Mon, 26 Aug 2013 17:00:05 -0400 Subject: [PATCH 0046/4438] add TEMPEST_LARGE_OPS_NUMBER to option The tempest scenario.large_ops test tries to catch any performance issues when running large numbers of operations at once, in this case launching instances. Set to 0 by default, to maintain the current default, but add this as an option so this can be set via devstacks localrc. Part of regression test for bug 1199433 Change-Id: I459717b849f9b6d180a7956c0ff470cefe7c1ff6 --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index 0d4f370c87..50289b60d4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -282,6 +282,9 @@ function configure_tempest() { # Scenario iniset $TEMPEST_CONF scenario img_dir "$FILES/images/cirros-0.3.1-x86_64-uec" + # Large Ops Number + iniset $TEMPEST_CONF scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} + # Volume CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND) if [ $CINDER_MULTI_LVM_BACKEND == "True" ]; then From bc2ef929ed4529197b0418fc8234aaca56f84109 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Thu, 15 Aug 2013 18:06:59 +0100 Subject: [PATCH 0047/4438] xenapi: devstack support for raw tgz image upload Devstack will recognise the .xen-raw.tgz extensions, and upload them to glance as raw tgz images with xen pv_mode. This change also adds "tgz" to the recognised container formats of glance. The changes for raw tgz support are: https://review.openstack.org/#/c/40908/ https://review.openstack.org/#/c/40909/ https://review.openstack.org/#/c/41651/ related to blueprint xenapi-supported-image-import-export Change-Id: I077564587d4303291bb4f10d62bb16380b574106 --- functions | 18 +++++++++++++++++- lib/glance | 4 ++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 41f008ed84..5ae2ee5cd3 100644 --- a/functions +++ b/functions @@ -1240,7 +1240,7 @@ function upload_image() { return fi - # XenServer-ovf-format images are provided as .vhd.tgz as well + # XenServer-vhd-ovf-format images are provided as .vhd.tgz # and should not be decompressed prior to loading if [[ "$image_url" =~ '.vhd.tgz' ]]; then IMAGE="$FILES/${IMAGE_FNAME}" @@ -1249,6 +1249,22 @@ function upload_image() { return fi + # .xen-raw.tgz suggests a Xen capable raw image inside a tgz. + # and should not be decompressed prior to loading. + # Setting metadata, so PV mode is used. + if [[ "$image_url" =~ '.xen-raw.tgz' ]]; then + IMAGE="$FILES/${IMAGE_FNAME}" + IMAGE_NAME="${IMAGE_FNAME%.xen-raw.tgz}" + glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name "$IMAGE_NAME" --is-public=True \ + --container-format=tgz --disk-format=raw \ + --property vm_mode=xen < "${IMAGE}" + return + fi + KERNEL="" RAMDISK="" DISK_FORMAT="" diff --git a/lib/glance b/lib/glance index a18189f474..64d8b0695a 100644 --- a/lib/glance +++ b/lib/glance @@ -108,6 +108,10 @@ function configure_glance() { fi iniset_rpc_backend glance $GLANCE_API_CONF DEFAULT iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api + if [ "$VIRT_DRIVER" = 'xenserver' ]; then + iniset $GLANCE_API_CONF DEFAULT container_formats "ami,ari,aki,bare,ovf,tgz" + iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,raw,iso" + fi # Store the images in swift if enabled. if is_service_enabled s-proxy; then From 04762cd823302ca9992b67419e55ad5fc4dbf8fe Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 27 Aug 2013 17:06:14 -0500 Subject: [PATCH 0048/4438] Fix is_package_installed() check with dpkg is_package_installed() incorrectly returned '0' for packages that had 'un' status in the dpkg database. Change-Id: I81b77486c2ed7717ed81cb2c2572fe6c4b394ffc --- functions | 28 ++++++++++++++++++++++++---- tests/functions.sh | 22 ++++++++++++++++++++++ 2 files changed, 46 insertions(+), 4 deletions(-) diff --git a/functions b/functions index 41f008ed84..b8df520854 100644 --- a/functions +++ b/functions @@ -317,16 +317,36 @@ function get_packages() { continue fi + # Assume we want this package + package=${line%#*} + inst_pkg=1 + + # Look for # dist:xxx in comment if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then # We are using BASH regexp matching feature. package=${BASH_REMATCH[1]} distros=${BASH_REMATCH[2]} # In bash ${VAR,,} will lowecase VAR - [[ ${distros,,} =~ ${DISTRO,,} ]] && echo $package - continue + # Look for a match in the distro list + if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then + # If no match then skip this package + inst_pkg=0 + fi + fi + + # Look for # testonly in comment + if [[ $line =~ (.*)#.*testonly.* ]]; then + package=${BASH_REMATCH[1]} + # Are we installing test packages? (test for the default value) + if [[ $INSTALL_TESTONLY_PACKAGES = "False" ]]; then + # If not installing test packages the skip this package + inst_pkg=0 + fi fi - echo ${line%#*} + if [[ $inst_pkg = 1 ]]; then + echo $package + fi done IFS=$OIFS done @@ -912,7 +932,7 @@ function is_package_installed() { fi if [[ "$os_PACKAGE" = "deb" ]]; then - dpkg -l "$@" > /dev/null 2> /dev/null + dpkg -s "$@" > /dev/null 2> /dev/null elif [[ "$os_PACKAGE" = "rpm" ]]; then rpm --quiet -q "$@" else diff --git a/tests/functions.sh b/tests/functions.sh index 27a6cfeec4..7d486d4cc5 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -367,3 +367,25 @@ if [[ "$VAL" -ne 0 ]]; then else echo "is_package_installed() on non-existing package failed" fi + +# test against removed package...was a bug on Ubuntu +if is_ubuntu; then + PKG=cowsay + if ! (dpkg -s $PKG >/dev/null 2>&1); then + # it was never installed...set up the condition + sudo apt-get install -y cowsay >/dev/null 2>&1 + fi + if (dpkg -s $PKG >/dev/null 2>&1); then + # remove it to create the 'un' status + sudo dpkg -P $PKG >/dev/null 2>&1 + fi + + # now test the installed check on a deleted package + is_package_installed $PKG + VAL=$? + if [[ "$VAL" -ne 0 ]]; then + echo "OK" + else + echo "is_package_installed() on deleted package failed" + fi +fi From 300e1bf276b16c7aeab1631f709048346db63bd8 Mon Sep 17 00:00:00 2001 From: Yong Sheng Gong Date: Wed, 28 Aug 2013 17:02:56 +0800 Subject: [PATCH 0049/4438] Use the varialbe to export right keystone api version Change-Id: I1e8ea2b7173c549065ed1f08814eb4b4bb2f05cd Fixes: Bug #1217783 --- openrc | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/openrc b/openrc index a23c6e95bd..3de7e3958f 100644 --- a/openrc +++ b/openrc @@ -63,21 +63,19 @@ SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} # should be listening on HOST_IP. If its running elsewhere, it can be set here GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} +# Identity API version +export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0} + # Authenticating against an Openstack cloud using Keystone returns a **Token** # and **Service Catalog**. The catalog contains the endpoints for all services # the user/tenant has access to - including nova, glance, keystone, swift, ... # We currently recommend using the 2.0 *identity api*. # -# *NOTE*: Using the 2.0 *identity api* does not mean that compute api is 2.0. We -# will use the 1.1 *compute api* -export OS_AUTH_URL=$SERVICE_PROTOCOL://$SERVICE_HOST:5000/v2.0 +export OS_AUTH_URL=$SERVICE_PROTOCOL://$SERVICE_HOST:5000/v${OS_IDENTITY_API_VERSION} # Set the pointer to our CA certificate chain. Harmless if TLS is not used. export OS_CACERT=$INT_CA_DIR/ca-chain.pem -# Identity API version -export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0} - # Currently novaclient needs you to specify the *compute api* version. This # needs to match the config of your catalog returned by Keystone. export NOVA_VERSION=${NOVA_VERSION:-1.1} From 2aa2a89cdb9071cea919116e283c16ac9dd841d6 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Sun, 4 Aug 2013 19:53:19 -0500 Subject: [PATCH 0050/4438] Add support for Docker as Nova hypervisor * Add basic support for hypervisor plugins in lib/nova_plugins * Add lib/nova_plugins/hypervisor-docker to use Docker as a Nova hypervisor. * Add tools/install_docker.sh to install the Docker daemon and registry container, download base image and import * Configure Nova to use docker plugin * Add docker exercise and skip unsupported ones Nova blueprint: new-hypervisor-docker Change-Id: I9e7065b562dce2ce853def583ab1165886612227 --- README.md | 6 +- clean.sh | 5 ++ exercises/boot_from_volume.sh | 3 + exercises/docker.sh | 105 +++++++++++++++++++++++ exercises/euca.sh | 3 + exercises/floating_ips.sh | 3 + exercises/sec_groups.sh | 3 + exercises/volumes.sh | 3 + lib/nova | 17 +++- lib/nova_plugins/hypervisor-docker | 132 +++++++++++++++++++++++++++++ stack.sh | 18 +++- tools/docker/README.md | 13 +++ tools/docker/install_docker.sh | 75 ++++++++++++++++ unstack.sh | 8 ++ 14 files changed, 391 insertions(+), 3 deletions(-) create mode 100755 exercises/docker.sh create mode 100644 lib/nova_plugins/hypervisor-docker create mode 100644 tools/docker/README.md create mode 100755 tools/docker/install_docker.sh diff --git a/README.md b/README.md index 6426e9a4ed..99e983887e 100644 --- a/README.md +++ b/README.md @@ -12,10 +12,14 @@ Read more at http://devstack.org (built from the gh-pages branch) IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you execute before you run them, as they install software and may alter your networking configuration. We strongly recommend that you run `stack.sh` in a clean and disposable vm when you are first getting started. -# Devstack on Xenserver +# DevStack on Xenserver If you would like to use Xenserver as the hypervisor, please refer to the instructions in `./tools/xen/README.md`. +# DevStack on Docker + +If you would like to use Docker as the hypervisor, please refer to the instructions in `./tools/docker/README.md`. + # Versions The devstack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[release] in the DevStack repo. For example, you can do the following to create a diablo OpenStack cloud: diff --git a/clean.sh b/clean.sh index f7d15dfe4e..a443ac82d0 100755 --- a/clean.sh +++ b/clean.sh @@ -64,6 +64,11 @@ cleanup_nova cleanup_neutron cleanup_swift +# Do the hypervisor cleanup until this can be moved back into lib/nova +if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + cleanup_nova_hypervisor +fi + # cinder doesn't always clean up the volume group as it might be used elsewhere... # clean it up if it is a loop device VG_DEV=$(sudo losetup -j $DATA_DIR/${VOLUME_GROUP}-backing-file | awk -F':' '/backing-file/ { print $1}') diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 36524ede4b..fe27bd0956 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -44,6 +44,9 @@ source $TOP_DIR/exerciserc # the exercise is skipped is_service_enabled cinder || exit 55 +# Also skip if the hypervisor is Docker +[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 + # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/exercises/docker.sh b/exercises/docker.sh new file mode 100755 index 0000000000..0672bc0087 --- /dev/null +++ b/exercises/docker.sh @@ -0,0 +1,105 @@ +#!/usr/bin/env bash + +# **docker** + +# Test Docker hypervisor + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occurred. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +# Skip if the hypervisor is not Docker +[[ "$VIRT_DRIVER" == "docker" ]] || exit 55 + +# Import docker functions and declarations +source $TOP_DIR/lib/nova_plugins/hypervisor-docker + +# Image and flavor are ignored but the CLI requires them... + +# Instance type to create +DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} + +# Boot this image, use first AMI image if unset +DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} + +# Instance name +VM_NAME=ex-docker + + +# Launching a server +# ================== + +# Grab the id of the image to launch +IMAGE=$(glance image-list | egrep " $DOCKER_IMAGE_NAME:latest " | get_field 1) +die_if_not_set $LINENO IMAGE "Failure getting image $DOCKER_IMAGE_NAME" + +# Select a flavor +INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) +if [[ -z "$INSTANCE_TYPE" ]]; then + # grab the first flavor in the list to launch if default doesn't exist + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) +fi + +# Clean-up from previous runs +nova delete $VM_NAME || true +if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then + die $LINENO "server didn't terminate!" +fi + +# Boot instance +# ------------- + +VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE $VM_NAME | grep ' id ' | get_field 2) +die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" + +# Check that the status is active within ACTIVE_TIMEOUT seconds +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then + die $LINENO "server didn't become active!" +fi + +# Get the instance IP +IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) +die_if_not_set $LINENO IP "Failure retrieving IP address" + +# Private IPs can be pinged in single node deployments +ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT + +# Clean up +# -------- + +# Delete instance +nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME" +if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then + die $LINENO "Server $VM_NAME not deleted" +fi + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" + diff --git a/exercises/euca.sh b/exercises/euca.sh index b8b283a8fb..64c0014236 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -41,6 +41,9 @@ fi # Import exercise configuration source $TOP_DIR/exerciserc +# Skip if the hypervisor is Docker +[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 + # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index f93a727df6..2833b650ba 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -38,6 +38,9 @@ fi # Import exercise configuration source $TOP_DIR/exerciserc +# Skip if the hypervisor is Docker +[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 + # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh index 6b67291cde..7d80570326 100755 --- a/exercises/sec_groups.sh +++ b/exercises/sec_groups.sh @@ -33,6 +33,9 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc +# Skip if the hypervisor is Docker +[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 + # Testing Security Groups # ======================= diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 028d19b36a..e536d16249 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -42,6 +42,9 @@ source $TOP_DIR/exerciserc # exercise is skipped. is_service_enabled cinder || exit 55 +# Also skip if the hypervisor is Docker +[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 + # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/lib/nova b/lib/nova index 842c098624..3486aa8fb9 100644 --- a/lib/nova +++ b/lib/nova @@ -169,6 +169,13 @@ function cleanup_nova() { fi sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR + + # NOTE(dtroyer): This really should be called from here but due to the way + # nova abuses the _cleanup() function we're moving it + # directly into cleanup.sh until this can be fixed. + #if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + # cleanup_nova_hypervisor + #fi } # configure_nova_rootwrap() - configure Nova's rootwrap @@ -650,7 +657,9 @@ function install_novaclient() { # install_nova() - Collect source and prepare function install_nova() { if is_service_enabled n-cpu; then - if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + install_nova_hypervisor + elif [[ "$VIRT_DRIVER" = 'libvirt' ]]; then if is_ubuntu; then install_package kvm install_package libvirt-bin @@ -728,6 +737,9 @@ function start_nova() { screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM" done else + if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + start_nova_hypervisor + fi screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM" fi screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" @@ -754,6 +766,9 @@ function stop_nova() { for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cond n-cell n-cell n-api-meta; do screen -S $SCREEN_NAME -p $serv -X kill done + if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + stop_nova_hypervisor + fi } diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker new file mode 100644 index 0000000000..4c8fc279b0 --- /dev/null +++ b/lib/nova_plugins/hypervisor-docker @@ -0,0 +1,132 @@ +# lib/nova_plugins/docker +# Configure the Docker hypervisor + +# Enable with: +# VIRT_DRIVER=docker + +# Dependencies: +# ``functions`` file +# ``nova`` and ``glance`` configurations + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories +DOCKER_DIR=$DEST/docker +DOCKER_REPO=${DOCKER_REPO:-https://github.com/dotcloud/openstack-docker.git} +DOCKER_BRANCH=${DOCKER_BRANCH:-master} + +DOCKER_UNIX_SOCKET=/var/run/docker.sock +DOCKER_PID_FILE=/var/run/docker.pid +DOCKER_REGISTRY_PORT=${DOCKER_REGISTRY_PORT:-5042} + +DOCKER_IMAGE=${DOCKER_IMAGE:-http://get.docker.io/images/openstack/docker-ut.tar.gz} +DOCKER_IMAGE_NAME=docker-busybox +DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-http://get.docker.io/images/openstack/docker-registry.tar.gz} +DOCKER_REGISTRY_IMAGE_NAME=docker-registry +DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME} + +DOCKER_PACKAGE_VERSION=${DOCKER_PACKAGE_VERSION:-0.6.1} +DOCKER_APT_REPO=${DOCKER_APT_REPO:-https://get.docker.io/ubuntu} + + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor() { + stop_service docker + + # Clean out work area + sudo rm -rf /var/lib/docker +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor() { + git_clone $DOCKER_REPO $DOCKER_DIR $DOCKER_BRANCH + + ln -snf ${DOCKER_DIR}/nova-driver $NOVA_DIR/nova/virt/docker + + iniset $NOVA_CONF DEFAULT compute_driver docker.DockerDriver + iniset $GLANCE_API_CONF DEFAULT container_formats ami,ari,aki,bare,ovf,docker + + sudo cp -p ${DOCKER_DIR}/nova-driver/docker.filters $NOVA_CONF_DIR/rootwrap.d +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor() { + # So far this is Ubuntu only + if ! is_ubuntu; then + die $LINENO "Docker is only supported on Ubuntu at this time" + fi + + # Make sure Docker is installed + if ! is_package_installed lxc-docker; then + die $LINENO "Docker is not installed. Please run tools/docker/install_docker.sh" + fi + + local docker_pid + read docker_pid <$DOCKER_PID_FILE + if [[ -z $docker_pid ]] || ! ps -p $docker_pid | grep [d]ocker; then + die $LINENO "Docker not running" + fi +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor() { + local docker_pid + read docker_pid <$DOCKER_PID_FILE + if [[ -z $docker_pid ]] || ! ps -p $docker_pid | grep [d]ocker; then + die $LINENO "Docker not running, start the daemon" + fi + + # Start the Docker registry container + docker run -d -p ${DOCKER_REGISTRY_PORT}:5000 \ + -e SETTINGS_FLAVOR=openstack -e OS_USERNAME=${OS_USERNAME} \ + -e OS_PASSWORD=${OS_PASSWORD} -e OS_TENANT_NAME=${OS_TENANT_NAME} \ + -e OS_GLANCE_URL="${SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" \ + -e OS_AUTH_URL=${OS_AUTH_URL} \ + $DOCKER_REGISTRY_IMAGE_NAME ./docker-registry/run.sh + + echo "Waiting for docker registry to start..." + DOCKER_REGISTRY=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT} + if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl -s $DOCKER_REGISTRY; do sleep 1; done"; then + die $LINENO "docker-registry did not start" + fi + + # Tag image if not already tagged + if ! docker images | grep $DOCKER_REPOSITORY_NAME; then + docker tag $DOCKER_IMAGE_NAME $DOCKER_REPOSITORY_NAME + fi + + # Make sure we copied the image in Glance + DOCKER_IMAGE=$(glance image-list | egrep " $DOCKER_IMAGE_NAME ") + if ! is_set DOCKER_IMAGE ; then + docker push $DOCKER_REPOSITORY_NAME + fi +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor() { + # Stop the docker registry container + docker kill $(docker ps | grep docker-registry | cut -d' ' -f1) +} + + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index 8f59328792..c3f69adcf1 100755 --- a/stack.sh +++ b/stack.sh @@ -319,6 +319,13 @@ source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap +# Look for Nova hypervisor plugin +NOVA_PLUGINS=$TOP_DIR/lib/nova_plugins +if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + # Load plugin + source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER +fi + # Set the destination directories for other OpenStack projects OPENSTACKCLIENT_DIR=$DEST/python-openstackclient @@ -1013,6 +1020,10 @@ if is_service_enabled cinder; then init_cinder fi + +# Compute Service +# --------------- + if is_service_enabled nova; then echo_summary "Configuring Nova" # Rebuild the config file from scratch @@ -1027,10 +1038,15 @@ if is_service_enabled nova; then fi + if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + # Configure hypervisor plugin + configure_nova_hypervisor + + # XenServer # --------- - if [ "$VIRT_DRIVER" = 'xenserver' ]; then + elif [ "$VIRT_DRIVER" = 'xenserver' ]; then echo_summary "Using XenServer virtualization driver" if [ -z "$XENAPI_CONNECTION_URL" ]; then die $LINENO "XENAPI_CONNECTION_URL is not specified" diff --git a/tools/docker/README.md b/tools/docker/README.md new file mode 100644 index 0000000000..976111f750 --- /dev/null +++ b/tools/docker/README.md @@ -0,0 +1,13 @@ +# DevStack on Docker + +Using Docker as Nova's hypervisor requries two steps: + +* Configure DevStack by adding the following to `localrc`:: + + VIRT_DRIVER=docker + +* Download and install the Docker service and images:: + + tools/docker/install_docker.sh + +After this, `stack.sh` should run as normal. diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh new file mode 100755 index 0000000000..d659ad104b --- /dev/null +++ b/tools/docker/install_docker.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash + +# **install_docker.sh** - Do the initial Docker installation and configuration + +# install_docker.sh +# +# Install docker package and images +# * downloads a base busybox image and a glance registry image if necessary +# * install the images in Docker's image cache + + +# Keep track of the current directory +SCRIPT_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $SCRIPT_DIR/../..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Load local configuration +source $TOP_DIR/stackrc + +FILES=$TOP_DIR/files + +# Get our defaults +source $TOP_DIR/lib/nova_plugins/hypervisor-docker + +SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} + + +# Install Docker Service +# ====================== + +# Stop the auto-repo updates and do it when required here +NO_UPDATE_REPOS=True + +# Set up home repo +curl https://get.docker.io/gpg | sudo apt-key add - +install_package python-software-properties && \ + sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list" +apt_get update +install_package --force-yes lxc-docker=${DOCKER_PACKAGE_VERSION} + +# Start the daemon - restart just in case the package ever auto-starts... +restart_service docker + +echo "Waiting for docker daemon to start..." +DOCKER_GROUP=$(groups | cut -d' ' -f1) +CONFIGURE_CMD="while ! /bin/echo -e 'GET /v1.3/version HTTP/1.0\n\n' | socat - unix-connect:$DOCKER_UNIX_SOCKET | grep -q '200 OK'; do + # Set the right group on docker unix socket before retrying + sudo chgrp $DOCKER_GROUP $DOCKER_UNIX_SOCKET + sudo chmod g+rw $DOCKER_UNIX_SOCKET + sleep 1 +done" +if ! timeout $SERVICE_TIMEOUT sh -c "$CONFIGURE_CMD"; then + die $LINENO "docker did not start" +fi + + +# Get Docker image +if [[ ! -r $FILES/docker-ut.tar.gz ]]; then + (cd $FILES; curl -OR $DOCKER_IMAGE) +fi +if [[ ! -r $FILES/docker-ut.tar.gz ]]; then + die $LINENO "Docker image unavailable" +fi +docker import - $DOCKER_IMAGE_NAME <$FILES/docker-ut.tar.gz + +# Get Docker registry image +if [[ ! -r $FILES/docker-registry.tar.gz ]]; then + (cd $FILES; curl -OR $DOCKER_REGISTRY_IMAGE) +fi +if [[ ! -r $FILES/docker-registry.tar.gz ]]; then + die $LINENO "Docker registry image unavailable" +fi +docker import - $DOCKER_REGISTRY_IMAGE_NAME <$FILES/docker-registry.tar.gz diff --git a/unstack.sh b/unstack.sh index 2268b90458..f053bcddd8 100755 --- a/unstack.sh +++ b/unstack.sh @@ -65,6 +65,14 @@ if [[ -n "$SCREEN" ]]; then fi fi +# Shut down Nova hypervisor plugins after Nova +NOVA_PLUGINS=$TOP_DIR/lib/nova_plugins +if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + # Load plugin + source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER + stop_nova_hypervisor +fi + # Swift runs daemons if is_service_enabled s-proxy; then stop_swift From b1dc9bd5e43568e0fc96b4e2be4520be12a1d955 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Thu, 29 Aug 2013 11:52:20 +0100 Subject: [PATCH 0051/4438] xenapi: enable block device access for stack user Although nova is setting the permissions on block devices, sometimes it fails, and that results in an instance failing to launch. It is only an issue for 3-part images, and images accessed through block devices. This patch adds an udev rule, so that devices will be accessible. fixes bug 1218251 Change-Id: I837ea515457fbfc50e9ce138ea9de9db12baa8be --- tools/xen/prepare_guest.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index f109d723d9..6ec5ffa546 100755 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -48,6 +48,11 @@ useradd $STACK_USER -s /bin/bash -d /opt/stack -G libvirtd echo $STACK_USER:$GUEST_PASSWORD | chpasswd echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +# Add an udev rule, so that new block devices could be written by stack user +cat > /etc/udev/rules.d/50-openstack-blockdev.rules << EOF +KERNEL=="xvd[b-z]", GROUP="$STACK_USER", MODE="0660" +EOF + # Give ownership of /opt/stack to stack user chown -R $STACK_USER /opt/stack From a213e2c3cafe0739c60766b451d0d44755e87ced Mon Sep 17 00:00:00 2001 From: Angus Salkeld Date: Fri, 30 Aug 2013 10:48:46 +1000 Subject: [PATCH 0052/4438] Move Heat to a single heat.conf the old config files that are no longer needed (but still supported): heat-engine.conf, heat-api.conf, heat-api-cfn.conf, heat-api-cw.conf Change-Id: I7ba0566325539bf7215bcb606843a90b5e3e4a98 --- lib/heat | 138 ++++++++++++++++++++----------------------------------- 1 file changed, 50 insertions(+), 88 deletions(-) diff --git a/lib/heat b/lib/heat index 8b6fd7fc4b..67509bcfa0 100644 --- a/lib/heat +++ b/lib/heat @@ -32,6 +32,7 @@ HEATCLIENT_DIR=$DEST/python-heatclient HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat} HEAT_STANDALONE=`trueorfalse False $HEAT_STANDALONE` HEAT_CONF_DIR=/etc/heat +HEAT_CONF=$HEAT_CONF_DIR/heat.conf HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates @@ -54,6 +55,8 @@ function configure_heat() { sudo mkdir -p $HEAT_CONF_DIR fi sudo chown $STACK_USER $HEAT_CONF_DIR + # remove old config files + rm -f $HEAT_CONF_DIR/heat-*.conf HEAT_API_CFN_HOST=${HEAT_API_CFN_HOST:-$SERVICE_HOST} HEAT_API_CFN_PORT=${HEAT_API_CFN_PORT:-8000} @@ -68,96 +71,55 @@ function configure_heat() { cp $HEAT_DIR/etc/heat/api-paste.ini $HEAT_API_PASTE_FILE cp $HEAT_DIR/etc/heat/policy.json $HEAT_POLICY_FILE + cp $HEAT_DIR/etc/heat/heat.conf.sample $HEAT_CONF + + # common options + iniset_rpc_backend heat $HEAT_CONF DEFAULT + iniset $HEAT_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT + iniset $HEAT_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition + iniset $HEAT_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT + iniset $HEAT_CONF DEFAULT sql_connection `database_connection_url heat` + iniset $HEAT_CONF DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/random` + + # logging + iniset $HEAT_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $HEAT_CONF DEFAULT use_syslog $SYSLOG + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + # Add color to logging output + iniset $HEAT_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $HEAT_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $HEAT_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" + iniset $HEAT_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + fi - # Cloudformation API - HEAT_API_CFN_CONF=$HEAT_CONF_DIR/heat-api-cfn.conf - cp $HEAT_DIR/etc/heat/heat-api-cfn.conf $HEAT_API_CFN_CONF - iniset $HEAT_API_CFN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - inicomment $HEAT_API_CFN_CONF DEFAULT log_file - iniset $HEAT_API_CFN_CONF DEFAULT use_syslog $SYSLOG - iniset $HEAT_API_CFN_CONF DEFAULT bind_host $HEAT_API_CFN_HOST - iniset $HEAT_API_CFN_CONF DEFAULT bind_port $HEAT_API_CFN_PORT - iniset $HEAT_API_CFN_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $HEAT_API_CFN_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $HEAT_API_CFN_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $HEAT_API_CFN_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_API_CFN_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $HEAT_API_CFN_CONF keystone_authtoken admin_user heat - iniset $HEAT_API_CFN_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $HEAT_API_CFN_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api-cfn - iniset $HEAT_API_CFN_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_API_CFN_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens - [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_API_CFN_CONF paste_deploy flavor standalone - - iniset_rpc_backend heat $HEAT_API_CFN_CONF DEFAULT + # keystone authtoken + iniset $HEAT_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $HEAT_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $HEAT_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $HEAT_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $HEAT_CONF keystone_authtoken admin_user heat + iniset $HEAT_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + iniset $HEAT_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api-cfn + + # ec2authtoken + iniset $HEAT_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens + + # paste_deploy + [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_CONF paste_deploy flavor standalone # OpenStack API - HEAT_API_CONF=$HEAT_CONF_DIR/heat-api.conf - cp $HEAT_DIR/etc/heat/heat-api.conf $HEAT_API_CONF - iniset $HEAT_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - inicomment $HEAT_API_CONF DEFAULT log_file - iniset $HEAT_API_CONF DEFAULT use_syslog $SYSLOG - iniset $HEAT_API_CONF DEFAULT bind_host $HEAT_API_HOST - iniset $HEAT_API_CONF DEFAULT bind_port $HEAT_API_PORT - iniset $HEAT_API_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $HEAT_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $HEAT_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $HEAT_API_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $HEAT_API_CONF keystone_authtoken admin_user heat - iniset $HEAT_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $HEAT_API_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api - iniset $HEAT_API_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_API_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens - [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_API_CONF paste_deploy flavor standalone - iniset_rpc_backend heat $HEAT_API_CONF DEFAULT - - - # engine - HEAT_ENGINE_CONF=$HEAT_CONF_DIR/heat-engine.conf - cp $HEAT_DIR/etc/heat/heat-engine.conf $HEAT_ENGINE_CONF - iniset $HEAT_ENGINE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - inicomment $HEAT_ENGINE_CONF DEFAULT log_file - iniset $HEAT_ENGINE_CONF DEFAULT use_syslog $SYSLOG - iniset $HEAT_ENGINE_CONF DEFAULT bind_host $HEAT_ENGINE_HOST - iniset $HEAT_ENGINE_CONF DEFAULT bind_port $HEAT_ENGINE_PORT - iniset $HEAT_ENGINE_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT - iniset $HEAT_ENGINE_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition - iniset $HEAT_ENGINE_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT - iniset $HEAT_ENGINE_CONF DEFAULT sql_connection `database_connection_url heat` - iniset $HEAT_ENGINE_CONF DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/random` - - iniset_rpc_backend heat $HEAT_ENGINE_CONF DEFAULT + iniset $HEAT_CONF heat_api bind_host $HEAT_API_HOST + iniset $HEAT_CONF heat_api bind_port $HEAT_API_PORT - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - # Add color to logging output - iniset $HEAT_ENGINE_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" - iniset $HEAT_ENGINE_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" - iniset $HEAT_ENGINE_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - iniset $HEAT_ENGINE_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" - fi + # Cloudformation API + iniset $HEAT_CONF heat_api_cfn bind_host $HEAT_API_CFN_HOST + iniset $HEAT_CONF heat_api_cfn bind_port $HEAT_API_CFN_PORT # Cloudwatch API - HEAT_API_CW_CONF=$HEAT_CONF_DIR/heat-api-cloudwatch.conf - cp $HEAT_DIR/etc/heat/heat-api-cloudwatch.conf $HEAT_API_CW_CONF - iniset $HEAT_API_CW_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - inicomment $HEAT_API_CW_CONF DEFAULT log_file - iniset $HEAT_API_CW_CONF DEFAULT use_syslog $SYSLOG - iniset $HEAT_API_CW_CONF DEFAULT bind_host $HEAT_API_CW_HOST - iniset $HEAT_API_CW_CONF DEFAULT bind_port $HEAT_API_CW_PORT - iniset $HEAT_API_CW_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $HEAT_API_CW_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $HEAT_API_CW_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $HEAT_API_CW_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_API_CW_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $HEAT_API_CW_CONF keystone_authtoken admin_user heat - iniset $HEAT_API_CW_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $HEAT_API_CW_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api-cloudwatch - iniset $HEAT_API_CW_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_API_CW_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens - [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_API_CW_CONF paste_deploy flavor standalone - - iniset_rpc_backend heat $HEAT_API_CW_CONF DEFAULT + iniset $HEAT_CONF heat_api_cloudwatch bind_host $HEAT_API_CW_HOST + iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT # heat environment sudo mkdir -p $HEAT_ENV_DIR @@ -207,10 +169,10 @@ function install_heat() { # start_heat() - Start running processes, including screen function start_heat() { - screen_it h-eng "cd $HEAT_DIR; bin/heat-engine --config-file=$HEAT_CONF_DIR/heat-engine.conf" - screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-dir=$HEAT_CONF_DIR/heat-api.conf" - screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-dir=$HEAT_CONF_DIR/heat-api-cfn.conf" - screen_it h-api-cw "cd $HEAT_DIR; bin/heat-api-cloudwatch --config-dir=$HEAT_CONF_DIR/heat-api-cloudwatch.conf" + screen_it h-eng "cd $HEAT_DIR; bin/heat-engine --config-file=$HEAT_CONF" + screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-file=$HEAT_CONF" + screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-file=$HEAT_CONF" + screen_it h-api-cw "cd $HEAT_DIR; bin/heat-api-cloudwatch --config-file=$HEAT_CONF" } # stop_heat() - Stop running processes From 97621a1d1f39a944a24371fc9f2bf9b86faec248 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Fri, 30 Aug 2013 13:12:17 +0100 Subject: [PATCH 0053/4438] xenapi: add username to vncviewer command Devstack prints out an instruction, how to look at the virtual machine's console. The command did not include the username, so if the user had a config file to use a different username for that network, the command failed. Change-Id: I5dd49169c45e26e8d2bb3d5920a1b7fa584be50f --- tools/xen/install_os_domU.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 997644d018..a012a08561 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -181,7 +181,7 @@ function wait_for_VM_to_halt() { mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.') domid=$(xe vm-list name-label="$GUEST_NAME" params=dom-id minimal=true) port=$(xenstore-read /local/domain/$domid/console/vnc-port) - echo "vncviewer -via $mgmt_ip localhost:${port:2}" + echo "vncviewer -via root@$mgmt_ip localhost:${port:2}" while true do state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted) From 1533a349da34a002ab6a09cee86d47daf6d777fb Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Fri, 30 Aug 2013 14:10:52 +0100 Subject: [PATCH 0054/4438] remove multi-host timeout If you ran exercises with MULTI_HOST enabled, an additional sleep was performed. This change removes that sleep to speed up tests. Change-Id: I9dfd61cbb9415bd5e8fd1e40f4e41512be2ae0d2 --- functions | 1 - 1 file changed, 1 deletion(-) diff --git a/functions b/functions index f24cc89e82..e0d0e2a70f 100644 --- a/functions +++ b/functions @@ -1454,7 +1454,6 @@ function _ping_check_novanet() { local check_command="" MULTI_HOST=`trueorfalse False $MULTI_HOST` if [[ "$MULTI_HOST" = "True" && "$from_net" = "$PRIVATE_NETWORK_NAME" ]]; then - sleep $boot_timeout return fi if [[ "$expected" = "True" ]]; then From 16ed068db52516238b618408656fa0bc612b9218 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Fri, 30 Aug 2013 13:28:31 +0100 Subject: [PATCH 0055/4438] xenapi: Set VM memory before starting it If someone was re-using an existing template, for a memory change, he needed to re-install the vm. This change sets the osdomu mem before starting it, so it doesn't matter how much memory did the VM had according to the template. It also removes the memory manipulation bits from install-os-vpx.sh. Change-Id: Iad85f573b90c23140012c20c552a17277d9c97a0 --- tools/xen/functions | 19 +++++++++++++++++++ tools/xen/install_os_domU.sh | 10 ++++++++-- tools/xen/scripts/install-os-vpx.sh | 28 +--------------------------- 3 files changed, 28 insertions(+), 29 deletions(-) diff --git a/tools/xen/functions b/tools/xen/functions index 7616a5fd4d..a5c4b70bc3 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -268,3 +268,22 @@ function attach_network() { xe network-attach uuid=$net host-uuid=$host } + +function set_vm_memory() { + local vm_name_label + local memory + + vm_name_label="$1" + memory="$2" + + local vm + + vm=$(_vm_uuid "$vm_name_label") + + xe vm-memory-limits-set \ + static-min=${memory}MiB \ + static-max=${memory}MiB \ + dynamic-min=${memory}MiB \ + dynamic-max=${memory}MiB \ + uuid=$vm +} diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 997644d018..dc7959ad79 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -228,8 +228,11 @@ if [ -z "$templateuuid" ]; then $THIS_DIR/scripts/install-os-vpx.sh \ -t "$UBUNTU_INST_TEMPLATE_NAME" \ -n "$UBUNTU_INST_BRIDGE_OR_NET_NAME" \ - -l "$GUEST_NAME" \ - -r "$OSDOMU_MEM_MB" + -l "$GUEST_NAME" + + set_vm_memory "$GUEST_NAME" "$OSDOMU_MEM_MB" + + xe vm-start vm="$GUEST_NAME" # wait for install to finish wait_for_VM_to_halt @@ -255,6 +258,9 @@ fi # Install XenServer tools, and other such things $THIS_DIR/prepare_guest_template.sh "$GUEST_NAME" +# Set virtual machine parameters +set_vm_memory "$GUEST_NAME" "$OSDOMU_MEM_MB" + # start the VM to run the prepare steps xe vm-start vm="$GUEST_NAME" diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index 8ee8b675a9..c94a593e3d 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -20,8 +20,6 @@ set -eux BRIDGE= -RAM= -BALLOONING= NAME_LABEL= TEMPLATE_NAME= @@ -29,7 +27,7 @@ usage() { cat << EOF - Usage: $0 -t TEMPLATE_NW_INSTALL -l NAME_LABEL [-n BRIDGE] [-r RAM] [-b] + Usage: $0 -t TEMPLATE_NW_INSTALL -l NAME_LABEL [-n BRIDGE] Install a VM from a template @@ -37,9 +35,6 @@ cat << EOF -h Shows this message. -t template VM template to use - -b Enable memory ballooning. When set min_RAM=RAM/2 max_RAM=RAM. - -r MiB Specifies RAM used by the VPX, in MiB. - By default it will take the value from the XVA. -l name Specifies the name label for the VM. -n bridge The bridge/network to use for eth0. Defaults to xenbr0 EOF @@ -53,12 +48,6 @@ get_params() h) usage exit 1 ;; - b) - BALLOONING=1 - ;; - r) - RAM=$OPTARG - ;; n) BRIDGE=$OPTARG ;; @@ -119,19 +108,6 @@ create_vif() } -set_memory() -{ - local v="$1" - if [ "$RAM" != "" ] - then - echo "Setting RAM to $RAM MiB." - [ "$BALLOONING" == 1 ] && RAM_MIN=$(($RAM / 2)) || RAM_MIN=$RAM - xe vm-memory-limits-set static-min=16MiB static-max=${RAM}MiB \ - dynamic-min=${RAM_MIN}MiB dynamic-max=${RAM}MiB \ - uuid="$v" - fi -} - # Make the VM auto-start on server boot. set_auto_start() @@ -161,5 +137,3 @@ set_auto_start "$vm_uuid" create_vif "$vm_uuid" xe vm-param-set other-config:os-vpx=true uuid="$vm_uuid" xe vm-param-set actions-after-reboot=Destroy uuid="$vm_uuid" -set_memory "$vm_uuid" -xe vm-start uuid=$vm_uuid From bee5c50766698baa87f5e049734708436766777b Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Fri, 30 Aug 2013 13:48:08 -0400 Subject: [PATCH 0056/4438] Disable neutron quotas when using fake virt driver Nova's fake virt driver, can be used to do scale testing, so when using it disable neutron's quota limits. Change-Id: I9ce995079af04202179820777217ef294df71226 --- lib/neutron | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/neutron b/lib/neutron index 31876dee88..bf04840e47 100644 --- a/lib/neutron +++ b/lib/neutron @@ -507,6 +507,15 @@ function _configure_neutron_common() { done fi + if [ "$VIRT_DRIVER" = 'fake' ]; then + # Disable arbitrary limits + iniset $NEUTRON_CONF quotas quota_network -1 + iniset $NEUTRON_CONF quotas quota_subnet -1 + iniset $NEUTRON_CONF quotas quota_port -1 + iniset $NEUTRON_CONF quotas quota_security_group -1 + iniset $NEUTRON_CONF quotas quota_security_group_rule -1 + fi + _neutron_setup_rootwrap } From 49ba22460bfc7932f061e7c2a100d73c8781d48b Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 9 Aug 2013 19:51:20 -0500 Subject: [PATCH 0057/4438] Move RHEL6 hacks to tools/fixup_stuff.sh Change-Id: Ice983bc16379bc2bc25659c37cfc16b63fdfc34b --- stack.sh | 58 ---------------------------------- tools/fixup_stuff.sh | 75 +++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 74 insertions(+), 59 deletions(-) diff --git a/stack.sh b/stack.sh index 8f59328792..df3cc4ed36 100755 --- a/stack.sh +++ b/stack.sh @@ -589,64 +589,6 @@ $TOP_DIR/tools/install_pip.sh # Do the ugly hacks for borken packages and distros $TOP_DIR/tools/fixup_stuff.sh - -# System-specific preconfigure -# ============================ - -if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then - # Disable selinux to avoid configuring to allow Apache access - # to Horizon files or run nodejs (LP#1175444) - if selinuxenabled; then - sudo setenforce 0 - fi - - # The following workarounds break xenserver - if [ "$VIRT_DRIVER" != 'xenserver' ]; then - # An old version of ``python-crypto`` (2.0.1) may be installed on a - # fresh system via Anaconda and the dependency chain - # ``cas`` -> ``python-paramiko`` -> ``python-crypto``. - # ``pip uninstall pycrypto`` will remove the packaged ``.egg-info`` - # file but leave most of the actual library files behind in - # ``/usr/lib64/python2.6/Crypto``. Later ``pip install pycrypto`` - # will install over the packaged files resulting - # in a useless mess of old, rpm-packaged files and pip-installed files. - # Remove the package so that ``pip install python-crypto`` installs - # cleanly. - # Note: other RPM packages may require ``python-crypto`` as well. - # For example, RHEL6 does not install ``python-paramiko packages``. - uninstall_package python-crypto - - # A similar situation occurs with ``python-lxml``, which is required by - # ``ipa-client``, an auditing package we don't care about. The - # build-dependencies needed for ``pip install lxml`` (``gcc``, - # ``libxml2-dev`` and ``libxslt-dev``) are present in - # ``files/rpms/general``. - uninstall_package python-lxml - fi - - # If the ``dbus`` package was installed by DevStack dependencies the - # uuid may not be generated because the service was never started (PR#598200), - # causing Nova to stop later on complaining that ``/var/lib/dbus/machine-id`` - # does not exist. - sudo service messagebus restart - - # ``setup.py`` contains a ``setup_requires`` package that is supposed - # to be transient. However, RHEL6 distribute has a bug where - # ``setup_requires`` registers entry points that are not cleaned - # out properly after the setup-phase resulting in installation failures - # (bz#924038). Pre-install the problem package so the ``setup_requires`` - # dependency is satisfied and it will not be installed transiently. - # Note we do this before the track-depends below. - pip_install hgtools - - # RHEL6's version of ``python-nose`` is incompatible with Tempest. - # Install nose 1.1 (Tempest-compatible) from EPEL - install_package python-nose1.1 - # Add a symlink for the new nosetests to allow tox for Tempest to - # work unmolested. - sudo ln -sf /usr/bin/nosetests1.1 /usr/local/bin/nosetests -fi - install_rpc_backend if is_service_enabled $DATABASE_BACKENDS; then diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 60d0f468e0..371b25fc8f 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -9,10 +9,17 @@ # pip 1.4 doesn't fix it (1.3 did) # - httplib2 0.8 permissions are 600 in the package and # pip 1.4 doesn't fix it (1.3 did) +# - RHEL6: +# - set selinux not enforcing +# - (re)start messagebus daemon +# - remove distro packages python-crypto and python-lxml +# - pre-install hgtools to work around a bug in RHEL6 distribute +# - install nose 1.1 from EPEL + # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) # Change dir to top of devstack cd $TOP_DIR @@ -22,6 +29,10 @@ source $TOP_DIR/functions FILES=$TOP_DIR/files + +# Python Packages +# --------------- + # Pre-install affected packages so we can fix the permissions sudo pip install prettytable sudo pip install httplib2 @@ -41,3 +52,65 @@ for dir in $SITE_DIRS; do fi done + + +# RHEL6 +# ----- + +if [[ $DISTRO =~ (rhel6) ]]; then + + # Disable selinux to avoid configuring to allow Apache access + # to Horizon files or run nodejs (LP#1175444) + # FIXME(dtroyer): see if this can be skipped without node or if Horizon is not enabled + if selinuxenabled; then + sudo setenforce 0 + fi + + # If the ``dbus`` package was installed by DevStack dependencies the + # uuid may not be generated because the service was never started (PR#598200), + # causing Nova to stop later on complaining that ``/var/lib/dbus/machine-id`` + # does not exist. + sudo service messagebus restart + + # The following workarounds break xenserver + if [ "$VIRT_DRIVER" != 'xenserver' ]; then + # An old version of ``python-crypto`` (2.0.1) may be installed on a + # fresh system via Anaconda and the dependency chain + # ``cas`` -> ``python-paramiko`` -> ``python-crypto``. + # ``pip uninstall pycrypto`` will remove the packaged ``.egg-info`` + # file but leave most of the actual library files behind in + # ``/usr/lib64/python2.6/Crypto``. Later ``pip install pycrypto`` + # will install over the packaged files resulting + # in a useless mess of old, rpm-packaged files and pip-installed files. + # Remove the package so that ``pip install python-crypto`` installs + # cleanly. + # Note: other RPM packages may require ``python-crypto`` as well. + # For example, RHEL6 does not install ``python-paramiko packages``. + uninstall_package python-crypto + + # A similar situation occurs with ``python-lxml``, which is required by + # ``ipa-client``, an auditing package we don't care about. The + # build-dependencies needed for ``pip install lxml`` (``gcc``, + # ``libxml2-dev`` and ``libxslt-dev``) are present in + # ``files/rpms/general``. + uninstall_package python-lxml + fi + + # ``setup.py`` contains a ``setup_requires`` package that is supposed + # to be transient. However, RHEL6 distribute has a bug where + # ``setup_requires`` registers entry points that are not cleaned + # out properly after the setup-phase resulting in installation failures + # (bz#924038). Pre-install the problem package so the ``setup_requires`` + # dependency is satisfied and it will not be installed transiently. + # Note we do this before the track-depends in ``stack.sh``. + pip_install hgtools + + + # RHEL6's version of ``python-nose`` is incompatible with Tempest. + # Install nose 1.1 (Tempest-compatible) from EPEL + install_package python-nose1.1 + # Add a symlink for the new nosetests to allow tox for Tempest to + # work unmolested. + sudo ln -sf /usr/bin/nosetests1.1 /usr/local/bin/nosetests + +fi From 4728001d014a38409aabf639fc9a06024342321a Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Sat, 31 Aug 2013 12:12:46 +0100 Subject: [PATCH 0058/4438] xenapi: Increase default OS domU memory to 2G In XenServer scenarios, an additional domU is created to run OpenStack services. This change is increasing the memory for that VM to speed up test runs. Change-Id: I322f4e4703e506620fa7e7456c4264ee0d050edc --- tools/xen/xenrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/xenrc b/tools/xen/xenrc index 03b30ac55e..f698be1085 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -13,7 +13,7 @@ CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false} # Size of image VDI_MB=${VDI_MB:-5000} -OSDOMU_MEM_MB=1024 +OSDOMU_MEM_MB=2048 OSDOMU_VDI_GB=8 # Network mapping. Specify bridge names or network names. Network names may From bbf56237747cace1f4c8f393893239488b9a344f Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Fri, 30 Aug 2013 12:40:18 +0100 Subject: [PATCH 0059/4438] xenapi: README.md and embedded localrc updates This change updates the readme, so that it is easier to get started, and reflects the actual behavior of DevStack. Changes in README.md: - Link to xenserver download page - Remove neutron interface - it is no longer installed by devstack - Add appendix with - How to use a different ubuntu mirror - How to use a proxy for ubuntu - How to re-use the created VM - Remove run from snapshot section and "do cloudy stuff" Changes in the Readme-embedded sample localrc: - Upload a vhd image and a uec image by default - easier to get started Change-Id: I13bb8e59ff5367ff7623fe9aa273886a957f81a7 --- tools/xen/README.md | 131 +++++++++++++++++++++++++++++++------------- 1 file changed, 94 insertions(+), 37 deletions(-) diff --git a/tools/xen/README.md b/tools/xen/README.md index af54d729b1..06192ed2b7 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -1,48 +1,54 @@ -# Getting Started With XenServer 5.6 and Devstack -The purpose of the code in this directory it to help developers bootstrap -a XenServer 5.6 (or greater) + Openstack development environment. This file gives -some pointers on how to get started. +# Getting Started With XenServer and Devstack -Xenserver is a Type 1 hypervisor, so it needs to be installed on bare metal. -The Openstack services are configured to run within a "privileged" virtual -machine on the Xenserver host (called OS domU). The VM uses the XAPI toolstack -to communicate with the host. +The purpose of the code in this directory it to help developers bootstrap a +XenServer 6.2 (older versions may also work) + Openstack development +environment. This file gives some pointers on how to get started. + +Xenserver is a Type 1 hypervisor, so it is best installed on bare metal. The +Openstack services are configured to run within a virtual machine (called OS +domU) on the XenServer host. The VM uses the XAPI toolstack to communicate with +the host over a network connection (see `MGT_BRIDGE_OR_NET_NAME`). The provided localrc helps to build a basic environment. -The requirements are: + +## Introduction + +### Requirements + - An internet-enabled network with a DHCP server on it - XenServer box plugged in to the same network This network will be used as the OpenStack management network. The VM Network and the Public Network will not be connected to any physical interfaces, only new virtual networks will be created by the `install_os_domU.sh` script. -Steps to follow: +### Steps to follow + - Install XenServer - Download Devstack to XenServer - Customise `localrc` - Start `install_os_domU.sh` script +### Brief explanation + The `install_os_domU.sh` script will: - Setup XenAPI plugins - Create the named networks, if they don't exist - - Preseed-Netinstall an Ubuntu Virtual Machine, with 1 network interface: - - eth0 - Connected to `UBUNTU_INST_BRIDGE_OR_NET_NAME`, defaults to - `MGT_BRIDGE_OR_NET_NAME` + - Preseed-Netinstall an Ubuntu Virtual Machine (NOTE: you can save and reuse + it, see [Reuse the Ubuntu VM](#reuse-the-ubuntu-vm)), with 1 network + interface: + - `eth0` - Connected to `UBUNTU_INST_BRIDGE_OR_NET_NAME`, defaults to + `MGT_BRIDGE_OR_NET_NAME` - After the Ubuntu install process finished, the network configuration is modified to: - - eth0 - Management interface, connected to `MGT_BRIDGE_OR_NET_NAME` - - eth1 - VM interface, connected to `VM_BRIDGE_OR_NET_NAME` - - eth2 - Public interface, connected to `PUB_BRIDGE_OR_NET_NAME` - - (eth3) - Optional network interface if neutron is used, to enforce xapi to - create the underlying bridge. + - `eth0` - Management interface, connected to `MGT_BRIDGE_OR_NET_NAME`. Xapi + must be accessible through this network. + - `eth1` - VM interface, connected to `VM_BRIDGE_OR_NET_NAME` + - `eth2` - Public interface, connected to `PUB_BRIDGE_OR_NET_NAME` - Start devstack inside the created OpenStack VM ## Step 1: Install Xenserver -Install XenServer 5.6+ on a clean box. You can get XenServer by signing -up for an account on citrix.com, and then visiting: -https://www.citrix.com/English/ss/downloads/details.asp?downloadId=2311504&productId=683148 - -For details on installation, see: http://wiki.openstack.org/XenServer/Install +Install XenServer on a clean box. You can download the latest XenServer for +free from: http://www.xenserver.org/ The XenServer IP configuration depends on your local network setup. If you are using dhcp, make a reservation for XenServer, so its IP address won't change @@ -85,17 +91,20 @@ Of course, use real passwords if this machine is exposed. XENAPI_CONNECTION_URL="http://address_of_your_xenserver" VNCSERVER_PROXYCLIENT_ADDRESS=address_of_your_xenserver - # Do not download the usual images - IMAGE_URLS="" - # Explicitly set virt driver here + # Download a vhd and a uec image + IMAGE_URLS="\ + https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz,\ + http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz" + + # Explicitly set virt driver VIRT_DRIVER=xenserver - # Explicitly enable multi-host + + # Explicitly enable multi-host for nova-network HA MULTI_HOST=1 + # Give extra time for boot ACTIVE_TIMEOUT=45 - # NOTE: the value of FLAT_NETWORK_BRIDGE will automatically be determined - # by install_os_domU.sh script. EOF ## Step 4: Run `./install_os_domU.sh` from the `tools/xen` directory @@ -107,12 +116,60 @@ Once this script finishes executing, log into the VM (openstack domU) that it installed and tail the run.sh.log file. You will need to wait until it run.sh has finished executing. -## Step 5: Do cloudy stuff! -* Play with horizon -* Play with the CLI -* Log bugs to devstack and core projects, and submit fixes! +# Appendix + +This section contains useful information for running devstack in CI +environments / using ubuntu network mirrors. + +## Use a specific Ubuntu mirror for installation + +To speed up the Ubuntu installation, you can use a specific mirror. To specify +a mirror explicitly, include the following settings in your `localrc` file: + + UBUNTU_INST_HTTP_HOSTNAME="archive.ubuntu.com" + UBUNTU_INST_HTTP_DIRECTORY="/ubuntu" + +These variables set the `mirror/http/hostname` and `mirror/http/directory` +settings in the ubuntu preseed file. The minimal ubuntu VM will use the +specified parameters. + +## Use an http proxy to speed up Ubuntu installation + +To further speed up the Ubuntu VM and package installation, an internal http +proxy could be used. `squid-deb-proxy` has prooven to be stable. To use an http +proxy, specify: + + UBUNTU_INST_HTTP_PROXY="http://ubuntu-proxy.somedomain.com:8000" + +in your `localrc` file. + +## Reuse the Ubuntu VM + +Performing a minimal ubuntu installation could take a lot of time, depending on +your mirror/network speed. If you run `install_os_domU.sh` script on a clean +hypervisor, you can speed up the installation, by re-using the ubuntu vm from +a previous installation. + +### Export the Ubuntu VM to an XVA + +Given you have an nfs export `TEMPLATE_NFS_DIR`: + + TEMPLATE_FILENAME=devstack-jeos.xva + TEMPLATE_NAME=jeos_template_for_devstack + mountdir=$(mktemp -d) + mount -t nfs "$TEMPLATE_NFS_DIR" "$mountdir" + VM="$(xe template-list name-label="$TEMPLATE_NAME" --minimal)" + xe template-export template-uuid=$VM filename="$mountdir/$TEMPLATE_FILENAME" + umount "$mountdir" + rm -rf "$mountdir" + +### Import the Ubuntu VM + +Given you have an nfs export `TEMPLATE_NFS_DIR` where you exported the Ubuntu +VM as `TEMPLATE_FILENAME`: -## Step 6: Run from snapshot -If you want to quicky re-run devstack from a clean state, -using the same settings you used in your previous run, -you can revert the DomU to the snapshot called `before_first_boot` + mountdir=$(mktemp -d) + mount -t nfs "$TEMPLATE_NFS_DIR" "$mountdir" + xe vm-import filename="$mountdir/$TEMPLATE_FILENAME" + umount "$mountdir" + rm -rf "$mountdir" From f39ee96f1dc7fae9aaad194669467a7f9fcc3d31 Mon Sep 17 00:00:00 2001 From: Jorge Valderrama Romero Date: Mon, 2 Sep 2013 17:18:40 +0200 Subject: [PATCH 0060/4438] Fix deprecated params user_id, role_id, tenant_id Update keystone cli by changing parameter options (user_id, role_id and tenant_id) to use '-' rather than '_' in the method user_role_add and consequently the method user_role_remove without maintain backward compatibility because these are deprecated. python-keystoneclient - Bug #1150434 Change-Id: Ia5113718eb050cf7dba443b8d0caf3bdaa1730f0 --- files/keystone_data.sh | 36 ++++++++++++++++++------------------ lib/cinder | 6 +++--- lib/keystone | 14 +++++++------- lib/neutron | 6 +++--- lib/nova | 6 +++--- lib/swift | 8 ++++---- 6 files changed, 38 insertions(+), 38 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 45f9c8165c..3f3137cb14 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -58,9 +58,9 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" | # Nova needs ResellerAdmin role to download images when accessing # swift through the s3 api. keystone user-role-add \ - --tenant_id $SERVICE_TENANT \ - --user_id $NOVA_USER \ - --role_id $RESELLER_ROLE + --tenant-id $SERVICE_TENANT \ + --user-id $NOVA_USER \ + --role-id $RESELLER_ROLE fi # Heat @@ -69,9 +69,9 @@ if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then --pass="$SERVICE_PASSWORD" \ --tenant_id $SERVICE_TENANT \ --email=heat@example.com) - keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user_id $HEAT_USER \ - --role_id $SERVICE_ROLE + keystone user-role-add --tenant-id $SERVICE_TENANT \ + --user-id $HEAT_USER \ + --role-id $SERVICE_ROLE # heat_stack_user role is for users created by Heat keystone role-create --name heat_stack_user if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then @@ -106,9 +106,9 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then --tenant_id $SERVICE_TENANT \ --email=glance@example.com) keystone user-role-add \ - --tenant_id $SERVICE_TENANT \ - --user_id $GLANCE_USER \ - --role_id $ADMIN_ROLE + --tenant-id $SERVICE_TENANT \ + --user-id $GLANCE_USER \ + --role-id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then GLANCE_SERVICE=$(get_id keystone service-create \ --name=glance \ @@ -129,13 +129,13 @@ if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then --pass="$SERVICE_PASSWORD" \ --tenant_id $SERVICE_TENANT \ --email=ceilometer@example.com) - keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user_id $CEILOMETER_USER \ - --role_id $ADMIN_ROLE + keystone user-role-add --tenant-id $SERVICE_TENANT \ + --user-id $CEILOMETER_USER \ + --role-id $ADMIN_ROLE # Ceilometer needs ResellerAdmin role to access swift account stats. - keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user_id $CEILOMETER_USER \ - --role_id $RESELLER_ROLE + keystone user-role-add --tenant-id $SERVICE_TENANT \ + --user-id $CEILOMETER_USER \ + --role-id $RESELLER_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then CEILOMETER_SERVICE=$(get_id keystone service-create \ --name=ceilometer \ @@ -192,7 +192,7 @@ if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then --pass="$ADMIN_PASSWORD" \ --email=alt_demo@example.com) keystone user-role-add \ - --tenant_id $ALT_DEMO_TENANT \ - --user_id $ALT_DEMO_USER \ - --role_id $MEMBER_ROLE + --tenant-id $ALT_DEMO_TENANT \ + --user-id $ALT_DEMO_USER \ + --role-id $MEMBER_ROLE fi diff --git a/lib/cinder b/lib/cinder index 826b9586da..b30829f6de 100644 --- a/lib/cinder +++ b/lib/cinder @@ -335,9 +335,9 @@ create_cinder_accounts() { --email=cinder@example.com \ | grep " id " | get_field 2) keystone user-role-add \ - --tenant_id $SERVICE_TENANT \ - --user_id $CINDER_USER \ - --role_id $ADMIN_ROLE + --tenant-id $SERVICE_TENANT \ + --user-id $CINDER_USER \ + --role-id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then CINDER_SERVICE=$(keystone service-create \ --name=cinder \ diff --git a/lib/keystone b/lib/keystone index 0a35dd5d80..535710f52b 100644 --- a/lib/keystone +++ b/lib/keystone @@ -217,9 +217,9 @@ create_keystone_accounts() { --name admin \ | grep " id " | get_field 2) keystone user-role-add \ - --user_id $ADMIN_USER \ - --role_id $ADMIN_ROLE \ - --tenant_id $ADMIN_TENANT + --user-id $ADMIN_USER \ + --role-id $ADMIN_ROLE \ + --tenant-id $ADMIN_TENANT # service SERVICE_TENANT=$(keystone tenant-create \ @@ -244,10 +244,10 @@ create_keystone_accounts() { --pass "$ADMIN_PASSWORD" \ --email demo@example.com \ | grep " id " | get_field 2) - keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $DEMO_TENANT - keystone user-role-add --user_id $ADMIN_USER --role_id $ADMIN_ROLE --tenant_id $DEMO_TENANT - keystone user-role-add --user_id $DEMO_USER --role_id $ANOTHER_ROLE --tenant_id $DEMO_TENANT - keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $INVIS_TENANT + keystone user-role-add --user-id $DEMO_USER --role-id $MEMBER_ROLE --tenant-id $DEMO_TENANT + keystone user-role-add --user-id $ADMIN_USER --role-id $ADMIN_ROLE --tenant-id $DEMO_TENANT + keystone user-role-add --user-id $DEMO_USER --role-id $ANOTHER_ROLE --tenant-id $DEMO_TENANT + keystone user-role-add --user-id $DEMO_USER --role-id $MEMBER_ROLE --tenant-id $INVIS_TENANT # Keystone if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then diff --git a/lib/neutron b/lib/neutron index 01fe3eafba..f6c2377dbb 100644 --- a/lib/neutron +++ b/lib/neutron @@ -301,9 +301,9 @@ function create_neutron_accounts() { --email=neutron@example.com \ | grep " id " | get_field 2) keystone user-role-add \ - --tenant_id $SERVICE_TENANT \ - --user_id $NEUTRON_USER \ - --role_id $ADMIN_ROLE + --tenant-id $SERVICE_TENANT \ + --user-id $NEUTRON_USER \ + --role-id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then NEUTRON_SERVICE=$(keystone service-create \ --name=neutron \ diff --git a/lib/nova b/lib/nova index 0b65f84366..19093adc3a 100644 --- a/lib/nova +++ b/lib/nova @@ -399,9 +399,9 @@ create_nova_accounts() { --email=nova@example.com \ | grep " id " | get_field 2) keystone user-role-add \ - --tenant_id $SERVICE_TENANT \ - --user_id $NOVA_USER \ - --role_id $ADMIN_ROLE + --tenant-id $SERVICE_TENANT \ + --user-id $NOVA_USER \ + --role-id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then NOVA_SERVICE=$(keystone service-create \ --name=nova \ diff --git a/lib/swift b/lib/swift index 8e641521a0..f72beafef7 100644 --- a/lib/swift +++ b/lib/swift @@ -464,7 +464,7 @@ function create_swift_accounts() { SWIFT_USER=$(keystone user-create --name=swift --pass="$SERVICE_PASSWORD" \ --tenant_id $SERVICE_TENANT --email=swift@example.com | grep " id " | get_field 2) - keystone user-role-add --tenant_id $SERVICE_TENANT --user_id $SWIFT_USER --role_id $ADMIN_ROLE + keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $SWIFT_USER --role-id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then SWIFT_SERVICE=$(keystone service-create --name=swift --type="object-store" \ @@ -479,14 +479,14 @@ function create_swift_accounts() { SWIFT_TENANT_TEST1=$(keystone tenant-create --name=swifttenanttest1 | grep " id " | get_field 2) SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=testing --email=test@example.com | grep " id " | get_field 2) - keystone user-role-add --user_id $SWIFT_USER_TEST1 --role_id $ADMIN_ROLE --tenant_id $SWIFT_TENANT_TEST1 + keystone user-role-add --user-id $SWIFT_USER_TEST1 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST1 SWIFT_USER_TEST3=$(keystone user-create --name=swiftusertest3 --pass=testing3 --email=test3@example.com | grep " id " | get_field 2) - keystone user-role-add --user_id $SWIFT_USER_TEST3 --role_id $ANOTHER_ROLE --tenant_id $SWIFT_TENANT_TEST1 + keystone user-role-add --user-id $SWIFT_USER_TEST3 --role-id $ANOTHER_ROLE --tenant-id $SWIFT_TENANT_TEST1 SWIFT_TENANT_TEST2=$(keystone tenant-create --name=swifttenanttest2 | grep " id " | get_field 2) SWIFT_USER_TEST2=$(keystone user-create --name=swiftusertest2 --pass=testing2 --email=test2@example.com | grep " id " | get_field 2) - keystone user-role-add --user_id $SWIFT_USER_TEST2 --role_id $ADMIN_ROLE --tenant_id $SWIFT_TENANT_TEST2 + keystone user-role-add --user-id $SWIFT_USER_TEST2 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST2 } # init_swift() - Initialize rings From 533e14d6a5fc1ba3dbd24fb0075ef1eafd00a705 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 30 Aug 2013 15:11:22 -0500 Subject: [PATCH 0061/4438] Copy policy_add() from Grenade functions policy_all() was added to Grenade's functions file, which is notmally synced from DevStack so we need to bring it over here before the next sync. Change-Id: Ifd852e9d1ffe39fa23f6312d1ddf2874b5f2b9f0 --- functions | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/functions b/functions index f24cc89e82..14e817c999 100644 --- a/functions +++ b/functions @@ -1645,6 +1645,37 @@ vercmp_numbers() { } +# ``policy_add policy_file policy_name policy_permissions`` +# +# Add a policy to a policy.json file +# Do nothing if the policy already exists + +function policy_add() { + local policy_file=$1 + local policy_name=$2 + local policy_perm=$3 + + if grep -q ${policy_name} ${policy_file}; then + echo "Policy ${policy_name} already exists in ${policy_file}" + return + fi + + # Add a terminating comma to policy lines without one + # Remove the closing '}' and all lines following to the end-of-file + local tmpfile=$(mktemp) + uniq ${policy_file} | sed -e ' + s/]$/],/ + /^[}]/,$d + ' > ${tmpfile} + + # Append policy and closing brace + echo " \"${policy_name}\": ${policy_perm}" >>${tmpfile} + echo "}" >>${tmpfile} + + mv ${tmpfile} ${policy_file} +} + + # Restore xtrace $XTRACE From 4d0d5ce778d4fa79cdbe2e5532608060a95870e3 Mon Sep 17 00:00:00 2001 From: "Walter A. Boring IV" Date: Fri, 30 Aug 2013 12:39:42 -0700 Subject: [PATCH 0062/4438] Cinder needs iscsiadm available This patch adds the binary packages that contains the iscsiadm utility for cinder. Cinder uses the iscsiadm utility for various actions and it should be there for devstack users. Fixes bug #1219032 Change-Id: I8e1c6e2e5d4bfade50aba9259b6da3957d6d622d --- files/apts/cinder | 2 ++ files/rpms-suse/cinder | 1 + files/rpms/cinder | 1 + 3 files changed, 4 insertions(+) diff --git a/files/apts/cinder b/files/apts/cinder index 32cb3a0039..f8e3b6d06d 100644 --- a/files/apts/cinder +++ b/files/apts/cinder @@ -3,3 +3,5 @@ lvm2 qemu-utils libpq-dev python-dev +open-iscsi +open-iscsi-utils # Deprecated since quantal dist:lucid,oneiric,precise diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder index 49e2cb8249..55078da27c 100644 --- a/files/rpms-suse/cinder +++ b/files/rpms-suse/cinder @@ -3,3 +3,4 @@ tgt qemu-tools python-devel postgresql-devel +open-iscsi diff --git a/files/rpms/cinder b/files/rpms/cinder index 699f2fc22c..c4edb68f14 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -3,3 +3,4 @@ scsi-target-utils qemu-img python-devel postgresql-devel +iscsi-initiator-utils From 08df29bff4e5c9e717358e7593e8c5a9c51a26bf Mon Sep 17 00:00:00 2001 From: Nikolay Sobolevskiy Date: Fri, 30 Aug 2013 21:59:15 +0400 Subject: [PATCH 0063/4438] Add my_ip option in cinder.conf Then cinder installed in multinode environment with more than one interface, it's better to use CINDER_SERVICE_HOST option for cinder ip address. Change-Id: I775b70407379d9c97696f536f5f89cecb33657be --- lib/cinder | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/cinder b/lib/cinder index 826b9586da..aed3004370 100644 --- a/lib/cinder +++ b/lib/cinder @@ -226,6 +226,7 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT volume_group $VOLUME_GROUP iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s fi + iniset $CINDER_CONF DEFAULT my_ip "$CINDER_SERVICE_HOST" iniset $CINDER_CONF DEFAULT iscsi_helper tgtadm iniset $CINDER_CONF DEFAULT sql_connection `database_connection_url cinder` iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI From e2c4ee23642a00ebed0343ad2086b5c250f24516 Mon Sep 17 00:00:00 2001 From: sbauza Date: Thu, 29 Aug 2013 17:29:46 +0200 Subject: [PATCH 0064/4438] Fix Neutron issues related to Baremetal service When deploying devstack on a single host with a single NIC and baremetal and neutron services enabled, the host looses Internet access as default route is deleted. Also, if localrc is not correctly set with correct values, OVS ports and Neutron net and subnet aren't created (commands missing arguments), we need devstack to properly fail. Change-Id: I7f39bbdf7b8cb544b8b4a59effe16f04b85d1425 --- lib/neutron | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index dc3c622a7e..5664ff2cc5 100644 --- a/lib/neutron +++ b/lib/neutron @@ -327,6 +327,9 @@ function create_neutron_initial_network() { # Since neutron command is executed in admin context at this point, # ``--tenant_id`` needs to be specified. if is_baremetal; then + if [[ "$PUBLIC_INTERFACE" == '' || "$OVS_PHYSICAL_BRIDGE" == '' ]]; then + die $LINENO "Neutron settings for baremetal not set.. exiting" + fi sudo ovs-vsctl add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do sudo ip addr del $IP dev $PUBLIC_INTERFACE @@ -335,6 +338,7 @@ function create_neutron_initial_network() { NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2) SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) sudo ifconfig $OVS_PHYSICAL_BRIDGE up + sudo route add default gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE else NET_ID=$(neutron net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) @@ -493,7 +497,7 @@ function _configure_neutron_common() { # ``Q_PLUGIN_EXTRA_CONF_FILES=(file1, file2)`` neutron_plugin_configure_common - if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then + if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then die $LINENO "Neutron plugin not set.. exiting" fi From 1e3d318c861565ddc26746bed4818daee77e2f47 Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Thu, 15 Aug 2013 18:15:31 -0700 Subject: [PATCH 0065/4438] Support OpenSwan in Neturon VPNaaS Neutron VPNaaS chagned ipsec package for OpenSwan. This commit updates the package. Change-Id: I333501a405fbc552c575d26cfbac083646d05dfd --- lib/neutron_plugins/services/vpn | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn index 0a79a697ad..b8f5c7d56b 100644 --- a/lib/neutron_plugins/services/vpn +++ b/lib/neutron_plugins/services/vpn @@ -8,9 +8,10 @@ set +o xtrace AGENT_VPN_BINARY="$NEUTRON_BIN_DIR/neutron-vpn-agent" VPN_PLUGIN="neutron.services.vpn.plugin.VPNDriverPlugin" +IPSEC_PACKAGE=${IPSEC_PACKAGE:-"openswan"} function neutron_vpn_install_agent_packages() { - install_package strongswan + install_package $IPSEC_PACKAGE } function neutron_vpn_configure_common() { From b53bce1c262e59e1a39b8dd1d2cfcc2ab2e187ef Mon Sep 17 00:00:00 2001 From: fujioka yuuichi Date: Thu, 5 Sep 2013 19:08:50 +0900 Subject: [PATCH 0066/4438] Rename ceilometer alarm service name Rename service name "ceilometer-alarm-eval" to "ceilometer-alarm-singleton" and "ceilometer-alarm-notify" to ceilometer-alarm-notifier" in this patch. Change-Id: I6619cc02874f6f59c43ba2952325e9d0533e395d --- lib/ceilometer | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 8768122bab..53e98b86d7 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -5,7 +5,7 @@ # enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api # # To ensure Ceilometer alarming services are enabled also, further add to the localrc: -# enable_service ceilometer-alarm-notify ceilometer-alarm-eval +# enable_service ceilometer-alarm-notifier ceilometer-alarm-singleton # Dependencies: # - functions @@ -138,14 +138,14 @@ function start_ceilometer() { screen_it ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF" screen_it ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF" screen_it ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" - screen_it ceilometer-alarm-notify "ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" - screen_it ceilometer-alarm-eval "ceilometer-alarm-singleton --config-file $CEILOMETER_CONF" + screen_it ceilometer-alarm-notifier "ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" + screen_it ceilometer-alarm-singleton "ceilometer-alarm-singleton --config-file $CEILOMETER_CONF" } # stop_ceilometer() - Stop running processes function stop_ceilometer() { # Kill the ceilometer screen windows - for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api ceilometer-alarm-notify ceilometer-alarm-eval; do + for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-singleton; do screen -S $SCREEN_NAME -p $serv -X kill done } From 69f745748d4f2bcfd3e678037187bce1f8e53ccf Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 27 Aug 2013 11:43:53 +0200 Subject: [PATCH 0067/4438] Switch Ceilometer default backend to MySQL MongoDB 2.4 not being available in Ubuntu cloud archive for a while now, and the catch up done by this driver allows me to think it might be a good idea to switch by default on SQL for now on devstack. We can add another job to have Ceilometer tested on MongoDB too later. Change-Id: I74c3c436d009fed898c5ae4ffb82763e9a337d90 --- lib/ceilometer | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ceilometer b/lib/ceilometer index 8768122bab..46e4624d49 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -43,7 +43,7 @@ CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer} CEILOMETER_BIN_DIR=$(get_python_exec_prefix) # Set up database backend -CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mongodb} +CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql} # Functions # --------- From 33b331875d9ec904419ad8da3f5bf66077e5d78c Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 5 Sep 2013 13:06:52 -0700 Subject: [PATCH 0068/4438] Enable multi-threaded nova API servers Add an additional worker thread for each nova service: compute, ec2 and metadata. Any real deployment will be using multiple API server threads, so lets make devstack test that scenario. With this patch we will have four worker threads for each type of API server. Change-Id: I09f4c6f57e71982b8c7fc92645b3ebec12ff1348 --- lib/nova | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/nova b/lib/nova index 32a51d3d4d..e2e1ea2835 100644 --- a/lib/nova +++ b/lib/nova @@ -444,6 +444,9 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT" iniset $NOVA_CONF DEFAULT osapi_compute_extension "nova.api.openstack.compute.contrib.standard_extensions" iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP" + iniset $NOVA_CONF DEFAULT osapi_compute_workers "4" + iniset $NOVA_CONF DEFAULT ec2_workers "4" + iniset $NOVA_CONF DEFAULT metadata_workers "4" iniset $NOVA_CONF DEFAULT sql_connection `database_connection_url nova` if is_baremetal; then iniset $NOVA_CONF baremetal sql_connection `database_connection_url nova_bm` From a6273b9378622318057c2f5f685022389a066818 Mon Sep 17 00:00:00 2001 From: Ryan Hsu Date: Wed, 4 Sep 2013 23:51:29 -0700 Subject: [PATCH 0069/4438] Provide a means of setting vmdk custom properties via image filename Custom properties for vmdk disk type, storage adapter type, and networking adapter type can now be retrieved from a vmdk image's filename. The filename format is defined as: -:: An example filename following this format would be debian-2.6.32-i646-thin:ide:VirtualE1000. If the vmdk filename does not match the above format then underlying nova driver will supply default values. Change-Id: I83483d20f984250bd8154d8e270b2e801d2df303 Closes-bug: #1221044 --- functions | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/functions b/functions index df8166a0e2..d14c973715 100644 --- a/functions +++ b/functions @@ -1256,7 +1256,25 @@ function upload_image() { if [[ "$image_url" =~ '.vmdk' ]]; then IMAGE="$FILES/${IMAGE_FNAME}" IMAGE_NAME="${IMAGE_FNAME%.vmdk}" - glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format bare --disk-format vmdk --property vmware-disktype="preallocated" < "${IMAGE}" + + # Before we can upload vmdk type images to glance, we need to know it's + # disk type, storage adapter, and networking adapter. These values are + # passed to glance as custom properties. We take these values from the + # vmdk filename, which is expected in the following format: + # + # -:: + # + # If the filename does not follow the above format then the vsphere + # driver will supply default values. + property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).+:.+:.+$'` + if [[ ! -z "$property_string" ]]; then + IFS=':' read -a props <<< "$property_string" + vmdk_disktype="${props[0]}" + vmdk_adapter_type="${props[1]}" + vmdk_net_adapter="${props[2]}" + fi + + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format bare --disk-format vmdk --property vmware-disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${IMAGE}" return fi From e118655028bfb093c5dd0cde4d615a23a0abbc7c Mon Sep 17 00:00:00 2001 From: Angus Salkeld Date: Fri, 6 Sep 2013 13:35:09 +1000 Subject: [PATCH 0070/4438] Fix Heat's signing_dir This is not critical but looks odd using api-cfn. Change-Id: Ie0f5c66f635b4a7c6ba51581ad01bab624158e61 --- lib/heat | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/lib/heat b/lib/heat index 67509bcfa0..58505ab792 100644 --- a/lib/heat +++ b/lib/heat @@ -100,7 +100,7 @@ function configure_heat() { iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $HEAT_CONF keystone_authtoken admin_user heat iniset $HEAT_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $HEAT_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api-cfn + iniset $HEAT_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR # ec2authtoken iniset $HEAT_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 @@ -148,12 +148,8 @@ function init_heat() { # create_heat_cache_dir() - Part of the init_heat() process function create_heat_cache_dir() { # Create cache dirs - sudo mkdir -p $HEAT_AUTH_CACHE_DIR/api - sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR/api - sudo mkdir -p $HEAT_AUTH_CACHE_DIR/api-cfn - sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR/api-cfn - sudo mkdir -p $HEAT_AUTH_CACHE_DIR/api-cloudwatch - sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR/api-cloudwatch + sudo mkdir -p $HEAT_AUTH_CACHE_DIR + sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR } # install_heatclient() - Collect source and prepare From 5917868e75b0bd1a76bbf0e80eef50645e5b5c96 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Fri, 6 Sep 2013 16:14:17 +0200 Subject: [PATCH 0071/4438] Use 1.4.1 of pip. - This is where the option pip install --pre is. Change-Id: I3f836a701f17a4fea888ec51da62e7137cf0e6db --- tools/install_pip.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 6e3e9d2104..cb414a7168 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -25,7 +25,7 @@ FILES=$TOP_DIR/files # Handle arguments -INSTALL_PIP_VERSION=${INSTALL_PIP_VERSION:-"1.4"} +INSTALL_PIP_VERSION=${INSTALL_PIP_VERSION:-"1.4.1"} while [[ -n "$1" ]]; do case $1 in --force) From bc6324771b538ff9aee3ad44c4ca5ecdad402273 Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Fri, 6 Sep 2013 14:59:30 +0000 Subject: [PATCH 0072/4438] Default to linuxbridge and openvswitch drivers for ML2. Since the addition of ML2 port-binding, the linuxbridge and openvswitch drivers are required to be loaded when running with ML2. This small patch adds their loading into ML2 into devstack. Fixes bug 1220743 Change-Id: I97c5f4e0e4af59766e0084ed3b2dea2843cb33bf --- lib/neutron_plugins/ml2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 00bd716309..6ac20fe72e 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -20,7 +20,7 @@ Q_AGENT=${Q_AGENT:-openvswitch} source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent # List of MechanismDrivers to load -Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-} +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-openvswitch,linuxbridge} # List of Type Drivers to load Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,gre,vxlan} # Default GRE TypeDriver options From 74aad31c33b08f53681af07d79421970a106548f Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Wed, 28 Aug 2013 11:32:14 +0100 Subject: [PATCH 0073/4438] Missing pxelinux.0 on RedHat systems On a RedHat system the syslinux binaries are located in a different directory. Change-Id: I3b7a111e82e8845b6222c57fb2cfb725d9bb1dd7 --- lib/baremetal | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/lib/baremetal b/lib/baremetal index 8f6c3f1660..b591410638 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -215,7 +215,16 @@ function configure_baremetal_nova_dirs() { # ensure /tftpboot is prepared sudo mkdir -p /tftpboot sudo mkdir -p /tftpboot/pxelinux.cfg - sudo cp /usr/lib/syslinux/pxelinux.0 /tftpboot/ + + PXEBIN=/usr/share/syslinux/pxelinux.0 + if [ ! -f $PXEBIN ]; then + PXEBIN=/usr/lib/syslinux/pxelinux.0 + if [ ! -f $PXEBIN ]; then + die $LINENO "pxelinux.0 (from SYSLINUX) not found." + fi + fi + + sudo cp $PXEBIN /tftpboot/ sudo chown -R $STACK_USER:$LIBVIRT_GROUP /tftpboot # ensure $NOVA_STATE_PATH/baremetal is prepared From 35f0966d351c6cf4fe11c7bf482e1d9c02c7dac5 Mon Sep 17 00:00:00 2001 From: Ryu Ishimoto Date: Tue, 27 Aug 2013 18:32:00 +0900 Subject: [PATCH 0074/4438] Modify midonet plugin to support the latest MidoNet MidoNet has been upgraded and devstack needs to be updated to be compatible. This change is required to run the current version of MidoNet plugin with DevStack. Closes-Bug: #1222314 Change-Id: If3379b4d5da4e4fcf989ee7398b5952d71b68d5a --- lib/neutron_plugins/midonet | 10 ++++++---- lib/neutron_thirdparty/midonet | 16 ++++++---------- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet index 4d343f5c91..0ad760b289 100644 --- a/lib/neutron_plugins/midonet +++ b/lib/neutron_plugins/midonet @@ -31,7 +31,12 @@ function neutron_plugin_configure_debug_command() { } function neutron_plugin_configure_dhcp_agent() { - die $LINENO "q-dhcp must not be executed with MidoNet plugin!" + DHCP_DRIVER=${DHCP_DRIVER:-"neutron.plugins.midonet.agent.midonet_driver.DhcpNoOpDriver"} + DHCP_INTERFACE_DRIVER=${DHCP_INTEFACE_DRIVER:-"neutron.plugins.midonet.agent.midonet_driver.MidonetInterfaceDriver"} + iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_driver $DHCP_DRIVER + iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver $DHCP_INTERFACE_DRIVER + iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces True + iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True } function neutron_plugin_configure_l3_agent() { @@ -58,9 +63,6 @@ function neutron_plugin_configure_service() { if [[ "$MIDONET_PROVIDER_ROUTER_ID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $MIDONET_PROVIDER_ROUTER_ID fi - if [[ "$MIDONET_METADATA_ROUTER_ID" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE MIDONET metadata_router_id $MIDONET_METADATA_ROUTER_ID - fi } function neutron_plugin_setup_interface_driver() { diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet index b3c726fe93..7928bca31f 100644 --- a/lib/neutron_thirdparty/midonet +++ b/lib/neutron_thirdparty/midonet @@ -10,22 +10,20 @@ # MidoNet devstack destination dir MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet} +MIDONET_API_PORT=${MIDONET_API_PORT:-8080} +MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-api} # MidoNet client repo MIDONET_CLIENT_REPO=${MIDONET_CLIENT_REPO:-https://github.com/midokura/python-midonetclient.git} MIDONET_CLIENT_BRANCH=${MIDONET_CLIENT_BRANCH:-master} -MIDONET_CLIENT_DIR=$MIDONET_DIR/python-midonetclient +MIDONET_CLIENT_DIR=${MIDONET_CLIENT_DIR:-$MIDONET_DIR/python-midonetclient} # MidoNet OpenStack repo MIDONET_OS_REPO=${MIDONET_OS_REPO:-https://github.com/midokura/midonet-openstack.git} MIDONET_OS_BRANCH=${MIDONET_OS_BRANCH:-master} -MIDONET_OS_DIR=$MIDONET_DIR/midonet-openstack +MIDONET_OS_DIR=${MIDONET_OS_DIR:-$MIDONET_DIR/midonet-openstack} MIDONET_SETUP_SCRIPT=${MIDONET_SETUP_SCRIPT:-$MIDONET_OS_DIR/bin/setup_midonet_topology.py} - -MIDOLMAN_LOG=${MIDOLMAN_LOG:-/var/log/midolman/midolman.log} -MIDONET_API_LOG=${MIDONET_API_LOG:-/var/log/tomcat7/midonet-api.log} - # Save trace setting MY_XTRACE=$(set +o | grep xtrace) set +o xtrace @@ -37,13 +35,11 @@ function configure_midonet() { function init_midonet() { # Initialize DB. Evaluate the output of setup_midonet_topology.py to set - # env variables for provider router ID and metadata router ID - eval `python $MIDONET_SETUP_SCRIPT admin $ADMIN_PASSWORD $ADMIN_TENANT provider_devices` + # env variables for provider router ID. + eval `python $MIDONET_SETUP_SCRIPT $MIDONET_API_URL admin $ADMIN_PASSWORD admin provider_devices` die_if_not_set $LINENO provider_router_id "Error running midonet setup script, provider_router_id was not set." - die_if_not_set $LINENO metadata_router_id "Error running midonet setup script, metadata_router_id was not set." iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $provider_router_id - iniset /$Q_PLUGIN_CONF_FILE MIDONET metadata_router_id $metadata_router_id } function install_midonet() { From 061d52507d4f4e597b825e7e7fb0d9d1858e08db Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Mon, 9 Sep 2013 08:52:19 +0000 Subject: [PATCH 0075/4438] Augment instead of override extra ML2 options. The existing ML2 code overwrote extra options set in localrc with defualt values in some cases. This fixes it so it no longer does that and instead adds to rather than overrides those values. Fixes bug 1222854 Change-Id: Iafdaad7d4253f1b61e8a214c50adaf7599a641f2 --- lib/neutron_plugins/ml2 | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 00bd716309..4d4340b614 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -10,9 +10,9 @@ set +o xtrace Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-} # This has to be set here since the agent will set this in the config file if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "" ]]; then - Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_types=$Q_ML2_TENANT_NETWORK_TYPE) + Q_AGENT_EXTRA_AGENT_OPTS+=(tunnel_types=$Q_ML2_TENANT_NETWORK_TYPE) elif [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then - Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_types=gre) + Q_AGENT_EXTRA_AGENT_OPTS+=(tunnel_types=gre) fi # Default openvswitch L2 agent @@ -50,14 +50,14 @@ function neutron_plugin_configure_common() { function neutron_plugin_configure_service() { if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "" ]]; then - Q_SRV_EXTRA_OPTS=(tenant_network_types=$Q_ML2_TENANT_NETWORK_TYPE) + Q_SRV_EXTRA_OPTS+=(tenant_network_types=$Q_ML2_TENANT_NETWORK_TYPE) elif [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then # This assumes you want a simple configuration, and will overwrite # Q_SRV_EXTRA_OPTS if set in addition to ENABLE_TENANT_TUNNELS. - Q_SRV_EXTRA_OPTS=(tenant_network_types=gre) + Q_SRV_EXTRA_OPTS+=(tenant_network_types=gre) Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=(tunnel_id_ranges=$TENANT_TUNNEL_RANGES) elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then - Q_SRV_EXTRA_OPTS=(tenant_network_types=vlan) + Q_SRV_EXTRA_OPTS+=(tenant_network_types=vlan) else echo "WARNING - The ml2 plugin is using local tenant networks, with no connectivity between hosts." fi From 46d1ba6ef02b52de47897b78ccf9a29d022a0c17 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Mon, 9 Sep 2013 14:31:37 +0200 Subject: [PATCH 0076/4438] Install schema-image.json Otherwise a warning is logged during startup Change-Id: I958ab8bb7bce474d3e6854b43bb4709986fb61d4 Fixes: LP Bug#1222797 --- lib/glance | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/glance b/lib/glance index 64d8b0695a..7e6968200f 100644 --- a/lib/glance +++ b/lib/glance @@ -39,6 +39,7 @@ GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json +GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json # Support entry points installation of console scripts if [[ -d $GLANCE_DIR/bin ]]; then @@ -142,6 +143,7 @@ function configure_glance() { iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON + cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON } # create_glance_cache_dir() - Part of the init_glance() process From c33d1f986ed4c0ed8a944ada3030c1de51892290 Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Fri, 6 Sep 2013 18:14:51 +0100 Subject: [PATCH 0077/4438] Wrong arguments for die() call die() needs $LINENO as its first arg Change-Id: I7c8043dbeb55ec9ed566e7055a02c0a2993d0a8a --- lib/baremetal | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index 8f6c3f1660..0eb852887f 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -291,7 +291,7 @@ function extract_and_upload_k_and_r_from_image() { out=$($BM_IMAGE_BUILD_DIR/bin/disk-image-get-kernel \ -x -d $TOP_DIR/files -o bm-deploy -i $file) if [ $? -ne 0 ]; then - die "Failed to get kernel and ramdisk from $file" + die $LINENO "Failed to get kernel and ramdisk from $file" fi XTRACE=$(set +o | grep xtrace) set +o xtrace @@ -439,9 +439,9 @@ function add_baremetal_node() { "$BM_FLAVOR_ROOT_DISK" \ "$mac_1" \ | grep ' id ' | get_field 2 ) - [ $? -eq 0 ] || [ "$id" ] || die "Error adding baremetal node" + [ $? -eq 0 ] || [ "$id" ] || die $LINENO "Error adding baremetal node" id2=$(nova baremetal-interface-add "$id" "$mac_2" ) - [ $? -eq 0 ] || [ "$id2" ] || die "Error adding interface to barmetal node $id" + [ $? -eq 0 ] || [ "$id2" ] || die $LINENO "Error adding interface to barmetal node $id" } From 748fe3d5e33337555b0ae16ef1d3b3ed02ad80f2 Mon Sep 17 00:00:00 2001 From: Bob Melander Date: Thu, 31 Jan 2013 17:12:56 +0100 Subject: [PATCH 0078/4438] Changes to make Devstack work with Neutron L3 plugin patch. Implements bp/quantum-l3-plugin-support Change-Id: I0c56661685fb641efe34fee1390d7d4f37f84494 --- lib/neutron_plugins/ml2 | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 00bd716309..035d6cc81d 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -46,6 +46,15 @@ function neutron_plugin_configure_common() { Q_PLUGIN_CONF_FILENAME=ml2_conf.ini Q_DB_NAME="neutron_ml2" Q_PLUGIN_CLASS="neutron.plugins.ml2.plugin.Ml2Plugin" + # The ML2 plugin delegates L3 routing/NAT functionality to + # the L3 service plugin which must therefore be specified. + Q_L3_PLUGIN_CLASS=${Q_L3_PLUGIN_CLASS:-"neutron.services.l3_router.l3_router_plugin.L3RouterPlugin"} + if ini_has_option $NEUTRON_CONF DEFAULT service_plugins ; then + srv_plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins)","$Q_L3_PLUGIN_CLASS + else + srv_plugins=$Q_L3_PLUGIN_CLASS + fi + iniset $NEUTRON_CONF DEFAULT service_plugins $srv_plugins } function neutron_plugin_configure_service() { From 54d1faecc56e8008717b02e3c92b5abf628024db Mon Sep 17 00:00:00 2001 From: Clint Byrum Date: Mon, 9 Sep 2013 11:54:28 -0700 Subject: [PATCH 0079/4438] Lower Heat max_template_size for Tempest The default size leaves a very large padding for users to create larger templates, but for testing and development a 10kB template is plenty. This value is specifically meant to mirror upcoming changes to tempest so that they are unified and can be tested in lock-step. Change-Id: I0ea9798018a6d864ac04429c3ac89fb374583fb6 --- lib/heat | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/heat b/lib/heat index 58505ab792..ef134ec0f4 100644 --- a/lib/heat +++ b/lib/heat @@ -121,6 +121,9 @@ function configure_heat() { iniset $HEAT_CONF heat_api_cloudwatch bind_host $HEAT_API_CW_HOST iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT + # Set limits to match tempest defaults + iniset $HEAT_CONF max_template_size 10240 + # heat environment sudo mkdir -p $HEAT_ENV_DIR sudo chown $STACK_USER $HEAT_ENV_DIR From f208aafa35996c98de40c1388bbebf326ab2ed20 Mon Sep 17 00:00:00 2001 From: Cyril Roelandt Date: Thu, 5 Sep 2013 09:20:15 +0000 Subject: [PATCH 0080/4438] Swift: configure Ceilometer when it is enabled This allows the storage.objects.{incoming,outgoing}.bytes measurements to be easily used. Closes-Bug: #1221097 Change-Id: If988a85930d7df1e043997763c3b5ebd720d6d86 --- lib/swift | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/swift b/lib/swift index f72beafef7..742be67a82 100644 --- a/lib/swift +++ b/lib/swift @@ -61,6 +61,10 @@ SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} # Default is ``staticweb, tempurl, formpost`` SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb} +# Set ``SWIFT_EXTRAS_MIDDLEWARE_LAST`` to extras middlewares that need to be at +# the end of the pipeline. +SWIFT_EXTRAS_MIDDLEWARE_LAST=${SWIFT_EXTRAS_MIDDLEWARE_LAST} + # The ring uses a configurable number of bits from a path’s MD5 hash as # a partition index that designates a device. The number of bits kept # from the hash is known as the partition power, and 2 to the partition @@ -252,6 +256,12 @@ function configure_swift() { iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} + # Configure Ceilometer + if is_service_enabled ceilometer; then + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer use "egg:ceilometer#swift" + SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer" + fi + # By default Swift will be installed with keystone and tempauth middleware # and add the swift3 middleware if its configured for it. The token for # tempauth would be prefixed with the reseller_prefix setting TEMPAUTH_ the @@ -261,6 +271,7 @@ function configure_swift() { fi swift_pipeline+=" authtoken keystoneauth tempauth " sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER} + sed -i "/^pipeline/ { s/proxy-server/${SWIFT_EXTRAS_MIDDLEWARE_LAST} proxy-server/ ; }" ${SWIFT_CONFIG_PROXY_SERVER} iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true From eaa9e1e3e0af74ac66cd934bde6762a63d14d1a8 Mon Sep 17 00:00:00 2001 From: Akihiro MOTOKI Date: Tue, 10 Sep 2013 05:22:37 +0900 Subject: [PATCH 0081/4438] Configure VPNaaS Horizon panel if q-vpn is enabled Change-Id: I062fd31cb1de50f356c2c549a783d9c597b129fa Closes-Bug: #1223012 --- lib/horizon | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/horizon b/lib/horizon index f6bb9f55e0..e55bc152f6 100644 --- a/lib/horizon +++ b/lib/horizon @@ -101,6 +101,11 @@ function init_horizon() { _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_firewall True fi + # enable VPN dashboard in case service is enabled + if is_service_enabled q-vpn; then + _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_vpn True + fi + # Initialize the horizon database (it stores sessions and notices shown to # users). The user system is external (keystone). cd $HORIZON_DIR From 3632ab1b66e4928ed0b9ef6ef65392c0e5531a66 Mon Sep 17 00:00:00 2001 From: Giulio Fidente Date: Tue, 10 Sep 2013 02:51:26 +0200 Subject: [PATCH 0082/4438] enable volume backup tests if c-bak is enabled this will set to True the tempest volume/volume_backup_enabled option if c-bak is in ENABLED_SERVICES Change-Id: I69931d668411fc8144d0fdb2f58ad9b6e987f793 --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index 50289b60d4..e48ccf2062 100644 --- a/lib/tempest +++ b/lib/tempest @@ -286,6 +286,9 @@ function configure_tempest() { iniset $TEMPEST_CONF scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} # Volume + if is_service_enabled c-bak; then + iniset $TEMPEST_CONF volume volume_backup_enabled "True" + fi CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND) if [ $CINDER_MULTI_LVM_BACKEND == "True" ]; then iniset $TEMPEST_CONF volume multi_backend_enabled "True" From 5c3b861c1963ca6fee9048ed88873c4efea64b8c Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Tue, 10 Sep 2013 05:20:07 -0700 Subject: [PATCH 0083/4438] VMware: remove invalid configuration variable The configuration variable vmware_cluster_name is not used by the cinder plugin. Change-Id: I8c0ed58d1dcd66b6f8ea3325007bf5135216933c --- lib/cinder | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index 324db9de27..7f1544b444 100644 --- a/lib/cinder +++ b/lib/cinder @@ -293,7 +293,6 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT vmware_host_ip "$VMWAREAPI_IP" iniset $CINDER_CONF DEFAULT vmware_host_username "$VMWAREAPI_USER" iniset $CINDER_CONF DEFAULT vmware_host_password "$VMWAREAPI_PASSWORD" - iniset $CINDER_CONF DEFAULT vmware_cluster_name "$VMWAREAPI_CLUSTER" iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver" fi From f68c9d3fb77dab0a3ba4a63cd20e3f4bfac11c2b Mon Sep 17 00:00:00 2001 From: Clint Byrum Date: Tue, 10 Sep 2013 11:37:47 -0700 Subject: [PATCH 0084/4438] Fix section on iniset for max_template_size The section was not specified, leading to trying to create a section of max_template_size. Change-Id: Ie3b525030efa780e9cef2d3108be92169d400857 --- lib/heat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index ef134ec0f4..afa0eeb765 100644 --- a/lib/heat +++ b/lib/heat @@ -122,7 +122,7 @@ function configure_heat() { iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT # Set limits to match tempest defaults - iniset $HEAT_CONF max_template_size 10240 + iniset $HEAT_CONF DEFAULT max_template_size 10240 # heat environment sudo mkdir -p $HEAT_ENV_DIR From e700267e33d748fe36c621cf16497597fcbe6aac Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Thu, 5 Sep 2013 08:10:07 -0400 Subject: [PATCH 0085/4438] Only run chmod/chown for local files When the /opt/stack directory is NFS mounted, chown to the local user might not work. Create safe_chown and safe_chmod functions that do nothing on NFS filesystems to avoid spurrious errors. Change-Id: Iaa68879e867a4426b1990d4d46164769177dc7cc --- functions | 52 ++++++++++++++++++++++++++++++++++++++++++++++------ stack.sh | 10 +++++----- 2 files changed, 51 insertions(+), 11 deletions(-) diff --git a/functions b/functions index df8166a0e2..0634fac716 100644 --- a/functions +++ b/functions @@ -1158,6 +1158,51 @@ function service_check() { fi } +# Returns true if the directory is on a filesystem mounted via NFS. +function is_nfs_directory() { + local mount_type=`stat -f -L -c %T $1` + test "$mount_type" == "nfs" +} + +# Only run the command if the target file (the last arg) is not on an +# NFS filesystem. +function _safe_permission_operation() { + local args=( $@ ) + local last + local sudo_cmd + local dir_to_check + + let last="${#args[*]} - 1" + + dir_to_check=${args[$last]} + if [ ! -d "$dir_to_check" ]; then + dir_to_check=`dirname "$dir_to_check"` + fi + + if is_nfs_directory "$dir_to_check" ; then + return 0 + fi + + if [[ $TRACK_DEPENDS = True ]]; then + sudo_cmd="env" + else + sudo_cmd="sudo" + fi + + $sudo_cmd $@ +} + +# Only change ownership of a file or directory if it is not on an NFS +# filesystem. +function safe_chown() { + _safe_permission_operation chown $@ +} + +# Only change permissions of a file or directory if it is not on an +# NFS filesystem. +function safe_chmod() { + _safe_permission_operation chmod $@ +} # ``pip install -e`` the package, which processes the dependencies # using pip before running `setup.py develop` @@ -1165,11 +1210,6 @@ function service_check() { # setup_develop directory function setup_develop() { local project_dir=$1 - if [[ $TRACK_DEPENDS = True ]]; then - SUDO_CMD="env" - else - SUDO_CMD="sudo" - fi echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" @@ -1181,7 +1221,7 @@ function setup_develop() { pip_install -e $project_dir # ensure that further actions can do things like setup.py sdist - $SUDO_CMD chown -R $STACK_USER $1/*.egg-info + safe_chown -R $STACK_USER $1/*.egg-info } diff --git a/stack.sh b/stack.sh index 89e4c248c4..975194b846 100755 --- a/stack.sh +++ b/stack.sh @@ -203,7 +203,7 @@ if [[ $EUID -eq 0 ]]; then echo "Copying files to $STACK_USER user" STACK_DIR="$DEST/${TOP_DIR##*/}" cp -r -f -T "$TOP_DIR" "$STACK_DIR" - chown -R $STACK_USER "$STACK_DIR" + safe_chown -R $STACK_USER "$STACK_DIR" cd "$STACK_DIR" if [[ "$SHELL_AFTER_RUN" != "no" ]]; then exec sudo -u $STACK_USER bash -l -c "set -e; bash stack.sh; bash" @@ -236,8 +236,8 @@ fi # Create the destination directory and ensure it is writable by the user # and read/executable by everybody for daemons (e.g. apache run for horizon) sudo mkdir -p $DEST -sudo chown -R $STACK_USER $DEST -chmod 0755 $DEST +safe_chown -R $STACK_USER $DEST +safe_chmod 0755 $DEST # a basic test for $DEST path permissions (fatal on error unless skipped) check_path_perm_sanity ${DEST} @@ -258,7 +258,7 @@ ENABLE_DEBUG_LOG_LEVEL=`trueorfalse True $ENABLE_DEBUG_LOG_LEVEL` # Destination path for service data DATA_DIR=${DATA_DIR:-${DEST}/data} sudo mkdir -p $DATA_DIR -sudo chown -R $STACK_USER $DATA_DIR +safe_chown -R $STACK_USER $DATA_DIR # Common Configuration @@ -954,7 +954,7 @@ if is_service_enabled n-net q-dhcp; then clean_iptables rm -rf ${NOVA_STATE_PATH}/networks sudo mkdir -p ${NOVA_STATE_PATH}/networks - sudo chown -R ${USER} ${NOVA_STATE_PATH}/networks + safe_chown -R ${USER} ${NOVA_STATE_PATH}/networks # Force IP forwarding on, just in case sudo sysctl -w net.ipv4.ip_forward=1 fi From 6650fda680310e71b5dda7764bf4033f670d90f0 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Tue, 10 Sep 2013 16:39:18 -0700 Subject: [PATCH 0086/4438] Revert "Swift: configure Ceilometer when it is enabled" This reverts commit f208aafa35996c98de40c1388bbebf326ab2ed20. This commit broke swift functional tests because the ceilometer middleware changes HTTP 404 responses into zero byte responses. This results in BadStatusLine exceptions. Back out the use of ceilometer middleware until it can be fixed. Change-Id: Ie25269b58334c40dc1ecae985326af1cf29c3af4 --- lib/swift | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/lib/swift b/lib/swift index 742be67a82..f72beafef7 100644 --- a/lib/swift +++ b/lib/swift @@ -61,10 +61,6 @@ SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} # Default is ``staticweb, tempurl, formpost`` SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb} -# Set ``SWIFT_EXTRAS_MIDDLEWARE_LAST`` to extras middlewares that need to be at -# the end of the pipeline. -SWIFT_EXTRAS_MIDDLEWARE_LAST=${SWIFT_EXTRAS_MIDDLEWARE_LAST} - # The ring uses a configurable number of bits from a path’s MD5 hash as # a partition index that designates a device. The number of bits kept # from the hash is known as the partition power, and 2 to the partition @@ -256,12 +252,6 @@ function configure_swift() { iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} - # Configure Ceilometer - if is_service_enabled ceilometer; then - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer use "egg:ceilometer#swift" - SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer" - fi - # By default Swift will be installed with keystone and tempauth middleware # and add the swift3 middleware if its configured for it. The token for # tempauth would be prefixed with the reseller_prefix setting TEMPAUTH_ the @@ -271,7 +261,6 @@ function configure_swift() { fi swift_pipeline+=" authtoken keystoneauth tempauth " sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER} - sed -i "/^pipeline/ { s/proxy-server/${SWIFT_EXTRAS_MIDDLEWARE_LAST} proxy-server/ ; }" ${SWIFT_CONFIG_PROXY_SERVER} iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true From cd77058ee0e974bd9bd7acaf8426ef24ea9a7a52 Mon Sep 17 00:00:00 2001 From: Alex Rudenko Date: Sun, 1 Sep 2013 16:26:03 +0200 Subject: [PATCH 0087/4438] blueprint devstack-support-for-keystone-mixbackend Added KEYSTONE_ASSIGNMENT_BACKEND to support mixed backend Modified code for KEYSTONE_IDENTITY_BACKEND according to comments. Implemented the check for variables instead of case statements. UPD: Removed arrays. UPD2: fixed spacing issues Change-Id: Ie92eed1fb5be5f875ef6633ede9c9e08daf6bf4f Implements: devstack-support-for-keystone-mixbackend --- lib/keystone | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) mode change 100644 => 100755 lib/keystone diff --git a/lib/keystone b/lib/keystone old mode 100644 new mode 100755 index 535710f52b..3642904e1c --- a/lib/keystone +++ b/lib/keystone @@ -44,6 +44,12 @@ KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates # Select the backend for Tokens KEYSTONE_TOKEN_BACKEND=${KEYSTONE_TOKEN_BACKEND:-sql} +# Select the backend for Identity +KEYSTONE_IDENTITY_BACKEND=${KEYSTONE_IDENTITY_BACKEND:-sql} + +# Select the backend for Assignment +KEYSTONE_ASSIGNMENT_BACKEND=${KEYSTONE_ASSIGNMENT_BACKEND:-sql} + # Select Keystone's token format # Choose from 'UUID' and 'PKI' KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-PKI} @@ -63,10 +69,14 @@ KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} # Set the tenant for service accounts in Keystone SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} +# valid identity backends as per dir keystone/identity/backends +KEYSTONE_VALID_IDENTITY_BACKENDS=kvs,ldap,pam,sql + +# valid assignment backends as per dir keystone/identity/backends +KEYSTONE_VALID_ASSIGNMENT_BACKENDS=kvs,ldap,sql # Functions # --------- - # cleanup_keystone() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_keystone() { @@ -116,8 +126,14 @@ function configure_keystone() { iniset $KEYSTONE_CONF DEFAULT member_role_name "_member_" fi - if [[ "$KEYSTONE_IDENTITY_BACKEND" == "ldap" ]]; then - iniset $KEYSTONE_CONF identity driver "keystone.identity.backends.ldap.Identity" + # check if identity backend is valid + if [[ "$KEYSTONE_VALID_IDENTITY_BACKENDS" =~ "$KEYSTONE_IDENTITY_BACKEND" ]]; then + iniset $KEYSTONE_CONF identity driver "keystone.identity.backends.$KEYSTONE_IDENTITY_BACKEND.Identity" + fi + + # check if assignment backend is valid + if [[ "$KEYSTONE_VALID_ASSIGNMENT_BACKENDS" =~ "$KEYSTONE_ASSIGNMENT_BACKEND" ]]; then + iniset $KEYSTONE_CONF assignment driver "keystone.assignment.backends.$KEYSTONE_ASSIGNMENT_BACKEND.Assignment" fi # Set the URL advertised in the ``versions`` structure returned by the '/' route From d02287e5e6f2b356beff8f485e0dd2f7c3beab5f Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 11 Sep 2013 14:08:59 -0400 Subject: [PATCH 0088/4438] import lib/oslo so we can clean it up we were calling cleanup_oslo, however we weren't importing lib/oslo, so that was just throwing an error message and moving on. Let's stop doing that and actually clean up oslo. Change-Id: I48340a8b3d5b50477fb5a7e2ce0bed27deb3ec01 --- clean.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/clean.sh b/clean.sh index a443ac82d0..6ceb5a4933 100755 --- a/clean.sh +++ b/clean.sh @@ -33,6 +33,7 @@ GetDistro source $TOP_DIR/lib/database source $TOP_DIR/lib/rpc_backend +source $TOP_DIR/lib/oslo source $TOP_DIR/lib/tls source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone From 0cccad43aad08487ad1712c63afed602889aaf72 Mon Sep 17 00:00:00 2001 From: Nikhil Manchanda Date: Mon, 3 Dec 2012 18:15:09 -0700 Subject: [PATCH 0089/4438] Added Trove (Database as a Service). - Added changes to stackrc for the Trove Repos. - Added support to devstack for "install", "configure", "init", and "run" implements blueprint:trove-devstack-integration Change-Id: Ib3f6daad33e629f764a174b80762c808ce8588e2 --- exercises/trove.sh | 45 +++++++++++ files/apts/trove | 1 + files/rpms-suse/trove | 1 + files/rpms/trove | 1 + functions | 2 + lib/trove | 170 ++++++++++++++++++++++++++++++++++++++++++ stack.sh | 27 ++++++- stackrc | 7 ++ unstack.sh | 5 ++ 9 files changed, 257 insertions(+), 2 deletions(-) create mode 100755 exercises/trove.sh create mode 100644 files/apts/trove create mode 100644 files/rpms-suse/trove create mode 100644 files/rpms/trove create mode 100644 lib/trove diff --git a/exercises/trove.sh b/exercises/trove.sh new file mode 100755 index 0000000000..d48d5fec99 --- /dev/null +++ b/exercises/trove.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +# **trove.sh** + +# Sanity check that trove started if enabled + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occurred. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +is_service_enabled trove || exit 55 + +# can we get a list versions +curl http://$SERVICE_HOST:8779/ 2>/dev/null | grep -q 'versions' || die $LINENO "Trove API not functioning!" + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" + diff --git a/files/apts/trove b/files/apts/trove new file mode 100644 index 0000000000..09dcee8104 --- /dev/null +++ b/files/apts/trove @@ -0,0 +1 @@ +libxslt1-dev # testonly diff --git a/files/rpms-suse/trove b/files/rpms-suse/trove new file mode 100644 index 0000000000..09dcee8104 --- /dev/null +++ b/files/rpms-suse/trove @@ -0,0 +1 @@ +libxslt1-dev # testonly diff --git a/files/rpms/trove b/files/rpms/trove new file mode 100644 index 0000000000..09dcee8104 --- /dev/null +++ b/files/rpms/trove @@ -0,0 +1 @@ +libxslt1-dev # testonly diff --git a/functions b/functions index f24cc89e82..54a72aefce 100644 --- a/functions +++ b/functions @@ -779,6 +779,7 @@ function is_running() { # **glance** returns true if any service enabled start with **g-** # **neutron** returns true if any service enabled start with **q-** # **swift** returns true if any service enabled start with **s-** +# **trove** returns true if any service enabled start with **tr-** # For backward compatibility if we have **swift** in ENABLED_SERVICES all the # **s-** services will be enabled. This will be deprecated in the future. # @@ -798,6 +799,7 @@ function is_service_enabled() { [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0 [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0 [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 + [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && return 0 [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0 [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0 done diff --git a/lib/trove b/lib/trove new file mode 100644 index 0000000000..e64ca5f6ac --- /dev/null +++ b/lib/trove @@ -0,0 +1,170 @@ +# lib/trove +# Functions to control the configuration and operation of the **Trove** service + +# Dependencies: +# ``functions`` file +# ``DEST``, ``STACK_USER`` must be defined +# ``SERVICE_{HOST|PROTOCOL|TOKEN}`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# install_trove +# configure_trove +# init_trove +# start_trove +# stop_trove +# cleanup_trove + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- + +NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} + +# Set up default configuration +TROVE_DIR=$DEST/trove +TROVECLIENT_DIR=$DEST/python-troveclient +TROVE_CONF_DIR=/etc/trove +TROVE_LOCAL_CONF_DIR=$TROVE_DIR/etc/trove +TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT//v$IDENTITY_API_VERSION +TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove} +TROVE_BIN_DIR=/usr/local/bin + +# create_trove_accounts() - Set up common required trove accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service trove admin # if enabled + +create_trove_accounts() { + # Trove + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + SERVICE_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + if [[ "$ENABLED_SERVICES" =~ "trove" ]]; then + TROVE_USER=$(keystone user-create --name=trove \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=trove@example.com \ + | grep " id " | get_field 2) + keystone user-role-add --tenant-id $SERVICE_TENANT \ + --user-id $TROVE_USER \ + --role-id $SERVICE_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + TROVE_SERVICE=$(keystone service-create \ + --name=trove \ + --type=database \ + --description="Trove Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $TROVE_SERVICE \ + --publicurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \ + --adminurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \ + --internalurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" + fi + fi +} + +# stack.sh entry points +# --------------------- + +# cleanup_trove() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_trove() { + #Clean up dirs + rm -fr $TROVE_AUTH_CACHE_DIR/* + rm -fr $TROVE_CONF_DIR/* +} + +# configure_troveclient() - Set config files, create data dirs, etc +function configure_troveclient() { + setup_develop $TROVECLIENT_DIR +} + +# configure_trove() - Set config files, create data dirs, etc +function configure_trove() { + setup_develop $TROVE_DIR + + # Create the trove conf dir and cache dirs if they don't exist + sudo mkdir -p ${TROVE_CONF_DIR} + sudo mkdir -p ${TROVE_AUTH_CACHE_DIR} + sudo chown -R $STACK_USER: ${TROVE_CONF_DIR} + sudo chown -R $STACK_USER: ${TROVE_AUTH_CACHE_DIR} + + # Copy api-paste file over to the trove conf dir and configure it + cp $TROVE_LOCAL_CONF_DIR/api-paste.ini $TROVE_CONF_DIR/api-paste.ini + TROVE_API_PASTE_INI=$TROVE_CONF_DIR/api-paste.ini + iniset $TROVE_API_PASTE_INI filter:tokenauth auth_host $KEYSTONE_AUTH_HOST + iniset $TROVE_API_PASTE_INI filter:tokenauth auth_port $KEYSTONE_AUTH_PORT + iniset $TROVE_API_PASTE_INI filter:tokenauth auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $TROVE_API_PASTE_INI filter:tokenauth admin_tenant_name $SERVICE_TENANT_NAME + iniset $TROVE_API_PASTE_INI filter:tokenauth admin_user trove + iniset $TROVE_API_PASTE_INI filter:tokenauth admin_password $SERVICE_PASSWORD + iniset $TROVE_API_PASTE_INI filter:tokenauth signing_dir $TROVE_AUTH_CACHE_DIR + + # (Re)create trove conf files + rm -f $TROVE_CONF_DIR/trove.conf + rm -f $TROVE_CONF_DIR/trove-taskmanager.conf + iniset $TROVE_CONF_DIR/trove.conf DEFAULT rabbit_password $RABBIT_PASSWORD + iniset $TROVE_CONF_DIR/trove.conf DEFAULT sql_connection `database_connection_url trove` + iniset $TROVE_CONF_DIR/trove.conf DEFAULT add_addresses True + + iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT rabbit_password $RABBIT_PASSWORD + iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT sql_connection `database_connection_url trove` + sed -i "s/localhost/$NETWORK_GATEWAY/g" $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample + + # (Re)create trove taskmanager conf file if needed + if is_service_enabled tr-tmgr; then + iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT rabbit_password $RABBIT_PASSWORD + iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT sql_connection `database_connection_url trove` + iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT taskmanager_manager trove.taskmanager.manager.Manager + iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_user radmin + iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_tenant_name trove + iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS + iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT + fi +} + +# install_troveclient() - Collect source and prepare +function install_troveclient() { + git_clone $TROVECLIENT_REPO $TROVECLIENT_DIR $TROVECLIENT_BRANCH +} + +# install_trove() - Collect source and prepare +function install_trove() { + git_clone $TROVE_REPO $TROVE_DIR $TROVE_BRANCH +} + +# init_trove() - Initializes Trove Database as a Service +function init_trove() { + #(Re)Create trove db + recreate_database trove utf8 + + #Initialize the trove database + $TROVE_DIR/bin/trove-manage db_sync +} + +# start_trove() - Start running processes, including screen +function start_trove() { + screen_it tr-api "cd $TROVE_DIR; bin/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug 2>&1" + screen_it tr-tmgr "cd $TROVE_DIR; bin/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug 2>&1" +} + +# stop_trove() - Stop running processes +function stop_trove() { + # Kill the trove screen windows + for serv in tr-api tr-tmgr; do + screen -S $SCREEN_NAME -p $serv -X kill + done +} + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index 89e4c248c4..be04bedade 100755 --- a/stack.sh +++ b/stack.sh @@ -2,8 +2,8 @@ # ``stack.sh`` is an opinionated OpenStack developer installation. It # installs and configures various combinations of **Ceilometer**, **Cinder**, -# **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron** -# and **Swift**. +# **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron**, +# **Swift**, and **Trove** # This script allows you to specify configuration options of what git # repositories to use, enabled services, network configuration and various @@ -319,6 +319,7 @@ source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap source $TOP_DIR/lib/ironic +source $TOP_DIR/lib/trove # Look for Nova hypervisor plugin NOVA_PLUGINS=$TOP_DIR/lib/nova_plugins @@ -720,6 +721,12 @@ if is_service_enabled heat; then configure_heat fi +if is_service_enabled trove; then + install_trove + install_troveclient + cleanup_trove +fi + if is_service_enabled tls-proxy; then configure_CA init_CA @@ -860,6 +867,10 @@ if is_service_enabled key; then create_cinder_accounts create_neutron_accounts + if is_service_enabled trove; then + create_trove_accounts + fi + if is_service_enabled swift || is_service_enabled s-proxy; then create_swift_accounts fi @@ -1236,6 +1247,18 @@ if is_service_enabled heat; then start_heat fi +# Configure and launch the trove service api, and taskmanager +if is_service_enabled trove; then + # Initialize trove + echo_summary "Configuring Trove" + configure_troveclient + configure_trove + init_trove + + # Start the trove API and trove taskmgr components + echo_summary "Starting Trove" + start_trove +fi # Create account rc files # ======================= diff --git a/stackrc b/stackrc index f9a977c432..3a338d16f2 100644 --- a/stackrc +++ b/stackrc @@ -181,6 +181,13 @@ RYU_BRANCH=${RYU_BRANCH:-master} SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git} SPICE_BRANCH=${SPICE_BRANCH:-master} +# trove service +TROVE_REPO=${TROVE_REPO:-${GIT_BASE}/openstack/trove.git} +TROVE_BRANCH=${TROVE_BRANCH:-master} + +# trove client library test +TROVECLIENT_REPO=${TROVECLIENT_REPO:-${GIT_BASE}/openstack/python-troveclient.git} +TROVECLIENT_BRANCH=${TROVECLIENT_BRANCH:-master} # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can diff --git a/unstack.sh b/unstack.sh index 38f795b09b..05d9fb7c83 100755 --- a/unstack.sh +++ b/unstack.sh @@ -34,6 +34,7 @@ source $TOP_DIR/lib/horizon source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron source $TOP_DIR/lib/ironic +source $TOP_DIR/lib/trove # Determine what system we are running on. This provides ``os_VENDOR``, # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` @@ -130,4 +131,8 @@ if is_service_enabled neutron; then cleanup_neutron fi +if is_service_enabled trove; then + cleanup_trove +fi + cleanup_tmp From d187bd95368c926af317723b3bc563ea2cae61bb Mon Sep 17 00:00:00 2001 From: Zhi Kun Liu Date: Wed, 11 Sep 2013 14:51:18 +0800 Subject: [PATCH 0090/4438] remove whitebox configuration in tempest.conf sync up with removing whitebox tests in tempest This commit depends on https://review.openstack.org/#/c/46116/ Change-Id: I410583187284c2951d872f6e9465f741decc60bd --- lib/tempest | 8 -------- 1 file changed, 8 deletions(-) diff --git a/lib/tempest b/lib/tempest index e48ccf2062..bc0b18d9f4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -249,14 +249,6 @@ function configure_tempest() { iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} - # Whitebox - iniset $TEMPEST_CONF whitebox source_dir $NOVA_SOURCE_DIR - iniset $TEMPEST_CONF whitebox bin_dir $NOVA_BIN_DIR - # TODO(jaypipes): Create the key file here... right now, no whitebox - # tests actually use a key. - iniset $TEMPEST_CONF whitebox path_to_private_key $TEMPEST_DIR/id_rsa - iniset $TEMPEST_CONF whitebox db_uri $BASE_SQL_CONN/nova - # Compute admin iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED From 05ae833be298d1b8fa85cfbb9ef57c059baea05e Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Tue, 20 Aug 2013 14:51:08 -0700 Subject: [PATCH 0091/4438] Colorize Neutron log output and refactor log setup code Bug 1214616 This patch adds colors to on-screen Neutron log output in the same way nova, cinder, and heat engine do. To this aim, colorized logging configuration has been moved to ./functions. The reason for this refactoring is that these instruction are the same or very similar for each project, with the only exception of the target configuration file. Change-Id: Idf0d1b842bb9ab046c9ef826de1dfc55b3f1df9d --- functions | 19 +++++++++++++++++++ lib/cinder | 7 ++----- lib/heat | 7 ++----- lib/neutron | 5 +++++ lib/nova | 7 ++----- 5 files changed, 30 insertions(+), 15 deletions(-) diff --git a/functions b/functions index f996ba89ab..566c85c3b9 100644 --- a/functions +++ b/functions @@ -1695,6 +1695,25 @@ function policy_add() { } +# This function sets log formatting options for colorizing log +# output to stdout. It is meant to be called by lib modules. +# The last two parameters are optional and can be used to specify +# non-default value for project and user format variables. +# Defaults are respectively 'project_name' and 'user_name' +# +# setup_colorized_logging something.conf SOMESECTION +function setup_colorized_logging() { + local conf_file=$1 + local conf_section=$2 + local project_var=${3:-"project_name"} + local user_var=${4:-"user_name"} + # Add color to logging output + iniset $conf_file $conf_section logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %("$user_var")s %("$project_var")s%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file $conf_section logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file $conf_section logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" + iniset $conf_file $conf_section logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" +} + # Restore xtrace $XTRACE diff --git a/lib/cinder b/lib/cinder index 7f1544b444..bec65ed234 100644 --- a/lib/cinder +++ b/lib/cinder @@ -255,12 +255,9 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT volume_clear none fi + # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - # Add color to logging output - iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s%(color)s] %(instance)s%(color)s%(message)s" - iniset $CINDER_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" - iniset $CINDER_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - iniset $CINDER_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + setup_colorized_logging $CINDER_CONF DEFAULT "project_id" "user_id" fi if [ "$CINDER_DRIVER" == "XenAPINFS" ]; then diff --git a/lib/heat b/lib/heat index afa0eeb765..ac769162db 100644 --- a/lib/heat +++ b/lib/heat @@ -1,4 +1,4 @@ -# lib/heat +etup lib/heat # Install and start **Heat** service # To enable, add the following to localrc @@ -86,10 +86,7 @@ function configure_heat() { iniset $HEAT_CONF DEFAULT use_syslog $SYSLOG if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output - iniset $HEAT_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" - iniset $HEAT_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" - iniset $HEAT_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - iniset $HEAT_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + setup_colorized_logging $HEAT_CONF DEFAULT fi # keystone authtoken diff --git a/lib/neutron b/lib/neutron index 5664ff2cc5..4a3d1b06a6 100644 --- a/lib/neutron +++ b/lib/neutron @@ -534,6 +534,11 @@ function _configure_neutron_common() { iniset $NEUTRON_CONF quotas quota_security_group_rule -1 fi + # Format logging + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + setup_colorized_logging $NEUTRON_CONF DEFAULT + fi + _neutron_setup_rootwrap } diff --git a/lib/nova b/lib/nova index 9b766a9114..568f67d445 100644 --- a/lib/nova +++ b/lib/nova @@ -499,12 +499,9 @@ function create_nova_conf() { if [ "$API_RATE_LIMIT" != "True" ]; then iniset $NOVA_CONF DEFAULT api_rate_limit "False" fi + # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - # Add color to logging output - iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" - iniset $NOVA_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" - iniset $NOVA_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - iniset $NOVA_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + setup_colorized_logging $NOVA_CONF DEFAULT else # Show user_name and project_name instead of user_id and project_id iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" From c76c058df21ae9fa0198dfcaad0c0ea4ead8e09f Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 12 Sep 2013 11:42:08 -0700 Subject: [PATCH 0092/4438] Disable ceilometer.compute.nova_notifier driver It appears that the ceilometer nova notification driver is causing nova-compute to hang. The last thing nova-compute logs before hanging is a line from this driver. At the very least the ceilometer nova notification keeps stacktracing. Change-Id: Ic375272b751159a64777ca73c1b64515195aacfb Related-Bug: #1221987 --- lib/nova | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/nova b/lib/nova index 9b766a9114..577c260d35 100644 --- a/lib/nova +++ b/lib/nova @@ -513,7 +513,6 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT instance_usage_audit "True" iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour" iniset $NOVA_CONF DEFAULT notify_on_state_change "vm_and_task_state" - iniset_multiline $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier" "ceilometer.compute.nova_notifier" fi # Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS`` From 8f5bf93d069f2ec4b85710fb05378e5d3027be86 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 16 Sep 2013 01:40:13 +0200 Subject: [PATCH 0093/4438] lib/tempest remove whitebox section tempest whitebox tests are removed from the tempest repo, so it's configuration is unnecessary. Change-Id: I6659e2af894014518a486f411ca06179d43bbb8b --- lib/tempest | 8 -------- 1 file changed, 8 deletions(-) diff --git a/lib/tempest b/lib/tempest index e48ccf2062..bc0b18d9f4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -249,14 +249,6 @@ function configure_tempest() { iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} - # Whitebox - iniset $TEMPEST_CONF whitebox source_dir $NOVA_SOURCE_DIR - iniset $TEMPEST_CONF whitebox bin_dir $NOVA_BIN_DIR - # TODO(jaypipes): Create the key file here... right now, no whitebox - # tests actually use a key. - iniset $TEMPEST_CONF whitebox path_to_private_key $TEMPEST_DIR/id_rsa - iniset $TEMPEST_CONF whitebox db_uri $BASE_SQL_CONN/nova - # Compute admin iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED From 1e4551db44d6c6d89ab5a595935b310ea0584210 Mon Sep 17 00:00:00 2001 From: Aaron Rosen Date: Mon, 16 Sep 2013 13:58:08 -0700 Subject: [PATCH 0094/4438] use method pip_install over sudo pip install so proxy settings work Change-Id: I2f0c69a72ef73c317b707d99c65cab0fb590d158 --- tools/fixup_stuff.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 371b25fc8f..87922c8ece 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -34,8 +34,8 @@ FILES=$TOP_DIR/files # --------------- # Pre-install affected packages so we can fix the permissions -sudo pip install prettytable -sudo pip install httplib2 +pip_install prettytable +pip_install httplib2 SITE_DIRS=$(python -c "import site; import os; print os.linesep.join(site.getsitepackages())") for dir in $SITE_DIRS; do From d582460147404587fbcd3a39f350109d1a04a74f Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Tue, 17 Sep 2013 11:44:37 +1000 Subject: [PATCH 0095/4438] Allow replacing a user variable in a swift template Prepare for a change in swift templates that will have a %USER% variable. Change-Id: I611ae7f82de7f2e6a38ce3de38d0600fa8687bff Partial-Bug: 1226346 --- lib/swift | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/swift b/lib/swift index f72beafef7..ae8ef746f0 100644 --- a/lib/swift +++ b/lib/swift @@ -132,6 +132,7 @@ function _config_swift_apache_wsgi() { s/%PORT%/$proxy_port/g; s/%SERVICENAME%/proxy-server/g; s/%APACHE_NAME%/${APACHE_NAME}/g; + s/%USER%/${STACK_USER}/g; " -i ${apache_vhost_dir}/proxy-server sudo cp ${SWIFT_DIR}/examples/wsgi/proxy-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/proxy-server.wsgi @@ -152,6 +153,7 @@ function _config_swift_apache_wsgi() { s/%PORT%/$object_port/g; s/%SERVICENAME%/object-server-${node_number}/g; s/%APACHE_NAME%/${APACHE_NAME}/g; + s/%USER%/${STACK_USER}/g; " -i ${apache_vhost_dir}/object-server-${node_number} ! is_fedora && sudo a2ensite object-server-${node_number} @@ -167,6 +169,7 @@ function _config_swift_apache_wsgi() { s/%PORT%/$container_port/g; s/%SERVICENAME%/container-server-${node_number}/g; s/%APACHE_NAME%/${APACHE_NAME}/g; + s/%USER%/${STACK_USER}/g; " -i ${apache_vhost_dir}/container-server-${node_number} ! is_fedora && sudo a2ensite container-server-${node_number} @@ -182,6 +185,7 @@ function _config_swift_apache_wsgi() { s/%PORT%/$account_port/g; s/%SERVICENAME%/account-server-${node_number}/g; s/%APACHE_NAME%/${APACHE_NAME}/g; + s/%USER%/${STACK_USER}/g; " -i ${apache_vhost_dir}/account-server-${node_number} ! is_fedora && sudo a2ensite account-server-${node_number} From 5a3d7707931186664f32b1232970e3f4f4b7526f Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Tue, 17 Sep 2013 11:44:05 +1000 Subject: [PATCH 0096/4438] Remove change_apache_user_group function This function allowed you to change the process running user for all of apache. This is better handled on a per-process basis. Change-Id: I165adc0c49fc328f34835856b49983c4e189f143 Fixes: bug 1226346 Fixes: bug 1225853 --- lib/apache | 24 ------------------------ lib/swift | 7 ------- 2 files changed, 31 deletions(-) diff --git a/lib/apache b/lib/apache index a2b0534d16..d811f87510 100644 --- a/lib/apache +++ b/lib/apache @@ -4,7 +4,6 @@ # Dependencies: # ``functions`` file # is_apache_enabled_service -# change_apache_user_group # install_apache_wsgi # config_apache_wsgi # start_apache_server @@ -52,29 +51,6 @@ function is_apache_enabled_service() { return 1 } -# change_apache_user_group() - Change the User/Group to run Apache server -function change_apache_user_group(){ - local stack_user=$@ - if is_ubuntu; then - sudo sed -e " - s/^export APACHE_RUN_USER=.*/export APACHE_RUN_USER=${stack_user}/g; - s/^export APACHE_RUN_GROUP=.*/export APACHE_RUN_GROUP=${stack_user}/g - " -i /etc/${APACHE_NAME}/envvars - elif is_fedora; then - sudo sed -e " - s/^User .*/User ${stack_user}/g; - s/^Group .*/Group ${stack_user}/g - " -i /etc/${APACHE_NAME}/httpd.conf - elif is_suse; then - sudo sed -e " - s/^User .*/User ${stack_user}/g; - s/^Group .*/Group ${stack_user}/g - " -i /etc/${APACHE_NAME}/uid.conf - else - exit_distro_not_supported "apache user and group" - fi -} - # install_apache_wsgi() - Install Apache server and wsgi module function install_apache_wsgi() { # Apache installation, because we mark it NOPRIME diff --git a/lib/swift b/lib/swift index ae8ef746f0..83f5369cbb 100644 --- a/lib/swift +++ b/lib/swift @@ -197,9 +197,6 @@ function _config_swift_apache_wsgi() { done - # run apache server as stack user - change_apache_user_group ${STACK_USER} - # WSGI isn't enabled by default, enable it ! is_fedora && sudo a2enmod wsgi } @@ -556,10 +553,6 @@ function start_swift() { fi if is_apache_enabled_service swift; then - # Make sure the apache lock dir is owned by $STACK_USER - # for running apache server to avoid failure of restarting - # apache server due to permission problem. - sudo chown -R $STACK_USER /var/run/lock/$APACHE_NAME restart_apache_server swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start screen_it s-proxy "cd $SWIFT_DIR && sudo tail -f /var/log/$APACHE_NAME/proxy-server" From 0f4f44315905db86fb0e3f43f9c6cf3b85ea34c1 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Tue, 17 Sep 2013 17:15:25 -0400 Subject: [PATCH 0097/4438] docker: ensure socat is installed install_docker.sh failed for me because socat wasn't installed. Add it to this script since it expects it to be there. Change-Id: Ic55f5e38de1b38bdd37407b7bec533d4c3eff2a9 --- tools/docker/install_docker.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh index d659ad104b..289002e8e7 100755 --- a/tools/docker/install_docker.sh +++ b/tools/docker/install_docker.sh @@ -38,7 +38,7 @@ curl https://get.docker.io/gpg | sudo apt-key add - install_package python-software-properties && \ sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list" apt_get update -install_package --force-yes lxc-docker=${DOCKER_PACKAGE_VERSION} +install_package --force-yes lxc-docker=${DOCKER_PACKAGE_VERSION} socat # Start the daemon - restart just in case the package ever auto-starts... restart_service docker From 3418c1caa5c52fd9989e5829fda0848b4a8dfea7 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 16 Sep 2013 18:35:49 +0200 Subject: [PATCH 0098/4438] Increase default swift storage Swift storage is used as glance image back-end. Tempest have cinder to uploads 1 GiB image from cinder in twice. In parallel execution in cause an issue, bacuse the current default size is 1_000_000 KiB. Increasing the default swit storage size from 1_000_000 KiB 4_000_000 KiB when tempest is enabled. Fixing bug 1225664 Change-Id: Iccd6368e4df71abb5ccfe7d361c64d86e1071d35 --- lib/swift | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/swift b/lib/swift index f72beafef7..3dcd8b6eb0 100644 --- a/lib/swift +++ b/lib/swift @@ -55,7 +55,13 @@ fi # swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in # kilobytes. # Default is 1 gigabyte. -SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} +SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=1048576 +# if tempest enabled the default size is 4 Gigabyte. +if is_service_enabled tempest; then + SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=${SWIFT_LOOPBACK_DISK_SIZE:-4194304} +fi + +SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_DEFAULT} # Set ``SWIFT_EXTRAS_MIDDLEWARE`` to extras middlewares. # Default is ``staticweb, tempurl, formpost`` From 1ca490c049d2d4b3882d764c1274a614b1588501 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Thu, 19 Sep 2013 10:03:36 +0100 Subject: [PATCH 0099/4438] xenapi: Use C locale By exporting the LC_ALL=C we can get rid of localisation issues, as the actual scripts are already assuming an english installation. FIxes bug 1227527 Change-Id: Ieeebce4d53b09959146a970f3fb803201ac5ebdf --- tools/xen/install_os_domU.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index b49504d9e9..110bbd998c 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -10,6 +10,8 @@ set -o errexit set -o nounset set -o xtrace +export LC_ALL=C + # Abort if localrc is not set if [ ! -e ../../localrc ]; then echo "You must have a localrc with ALL necessary passwords defined before proceeding." From 704106a1bd316d9a0df2f82233817ceeda92e744 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Thu, 12 Sep 2013 14:24:47 +0200 Subject: [PATCH 0100/4438] Fix LDAP support for openSUSE Closes-Bug: #1227651 Change-Id: I1c55fbb2f65f882a0ae2bcf4767c0a3e0f0f47e7 --- files/ldap/base-config.ldif | 19 +++++++++++++++++++ lib/ldap | 18 ++++++++++++++---- 2 files changed, 33 insertions(+), 4 deletions(-) create mode 100644 files/ldap/base-config.ldif diff --git a/files/ldap/base-config.ldif b/files/ldap/base-config.ldif new file mode 100644 index 0000000000..026d8bc0fc --- /dev/null +++ b/files/ldap/base-config.ldif @@ -0,0 +1,19 @@ +dn: cn=config +objectClass: olcGlobal +cn: config +olcArgsFile: /var/run/slapd/slapd.args +olcAuthzRegexp: {0}gidNumber=0\+uidNumber=0,cn=peercred,cn=external,cn=auth dn + :cn=config +olcPidFile: /var/run/slapd/slapd.pid +olcSizeLimit: 10000 + +dn: cn=schema,cn=config +objectClass: olcSchemaConfig +cn: schema + +include: file:///etc/openldap/schema/core.ldif + +dn: olcDatabase={1}hdb,cn=config +objectClass: olcHdbConfig +olcDbDirectory: /var/lib/ldap +olcSuffix: dc=openstack,dc=org diff --git a/lib/ldap b/lib/ldap index 89b31b2c25..2a24ccddf7 100644 --- a/lib/ldap +++ b/lib/ldap @@ -8,6 +8,7 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace +LDAP_SERVICE_NAME=slapd # Functions # --------- @@ -24,10 +25,19 @@ function install_ldap() { LDAP_ROOTPW_COMMAND=replace sudo DEBIAN_FRONTEND=noninteractive apt-get install slapd ldap-utils #automatically starts LDAP on ubuntu so no need to call start_ldap - elif is_fedora || is_suse; then + elif is_fedora; then LDAP_OLCDB_NUMBER=2 LDAP_ROOTPW_COMMAND=add start_ldap + elif is_suse; then + LDAP_OLCDB_NUMBER=1 + LDAP_ROOTPW_COMMAND=add + LDAP_SERVICE_NAME=ldap + # SUSE has slappasswd in /usr/sbin/ + PATH=$PATH:/usr/sbin/ + sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $FILES/ldap/base-config.ldif + sudo sed -i '/^OPENLDAP_START_LDAPI=/s/"no"/"yes"/g' /etc/sysconfig/openldap + start_ldap fi printf "generate password file" @@ -42,7 +52,7 @@ function install_ldap() { sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_MGR_DIFF_FILE # On fedora we need to manually add cosine and inetorgperson schemas - if is_fedora; then + if is_fedora || is_suse; then sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/cosine.ldif sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/inetorgperson.ldif fi @@ -64,13 +74,13 @@ function install_ldap() { # start_ldap() - Start LDAP function start_ldap() { - sudo service slapd restart + sudo service $LDAP_SERVICE_NAME restart } # stop_ldap() - Stop LDAP function stop_ldap() { - sudo service slapd stop + sudo service $LDAP_SERVICE_NAME stop } # clear_ldap_state() - Clear LDAP State From 7d5621583737fd74119cc30e8216780e1a192291 Mon Sep 17 00:00:00 2001 From: ZhiQiang Fan Date: Fri, 20 Sep 2013 02:20:35 +0800 Subject: [PATCH 0101/4438] Replace OpenStack LLC with OpenStack Foundation Change-Id: I7642e7163b615798867881b012240164465c5e43 Fixes-Bug: #1214176 --- tools/xen/scripts/install-os-vpx.sh | 2 +- tools/xen/scripts/mkxva | 2 +- tools/xen/scripts/uninstall-os-vpx.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index c94a593e3d..7469e0c10b 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -1,7 +1,7 @@ #!/bin/bash # # Copyright (c) 2011 Citrix Systems, Inc. -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/tools/xen/scripts/mkxva b/tools/xen/scripts/mkxva index a316da2ddb..392c05b407 100755 --- a/tools/xen/scripts/mkxva +++ b/tools/xen/scripts/mkxva @@ -1,7 +1,7 @@ #!/bin/bash # # Copyright (c) 2011 Citrix Systems, Inc. -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/tools/xen/scripts/uninstall-os-vpx.sh b/tools/xen/scripts/uninstall-os-vpx.sh index 0feaec79e5..ac260949c4 100755 --- a/tools/xen/scripts/uninstall-os-vpx.sh +++ b/tools/xen/scripts/uninstall-os-vpx.sh @@ -1,7 +1,7 @@ #!/bin/bash # # Copyright (c) 2011 Citrix Systems, Inc. -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may From 072d137766d2a9d933147a9cbb61445674387334 Mon Sep 17 00:00:00 2001 From: AmalaBasha Date: Fri, 20 Sep 2013 16:26:10 +0530 Subject: [PATCH 0102/4438] edit-glance-manage-command-for-recreate-db As per https://bugs.launchpad.net/glance/+bug/1213197, and subsequent review at https://review.openstack.org/#/c/47161/ Glance-manage commands are proposed to be subcommands of 'db'. This would require change to the script to recreate_db which calls the db_sync command. Implements blueprint edit-glance-manage-command-for-recreate-db Change-Id: I9470709ec34896dba7a37fdff4791206bb5ef5ed --- lib/glance | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index 7e6968200f..510692c261 100644 --- a/lib/glance +++ b/lib/glance @@ -171,7 +171,7 @@ function init_glance() { recreate_database glance utf8 # Migrate glance database - $GLANCE_BIN_DIR/glance-manage db_sync + $GLANCE_BIN_DIR/glance-manage db sync create_glance_cache_dir } From 14ea1a2b79aa7a9e7fff284b7d534c0038bbaa89 Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Sun, 22 Sep 2013 03:04:56 +0000 Subject: [PATCH 0103/4438] Correctly set the L3 service plugin for ML2 ML2 uses a service plugin for L3. This patch to devstack correctly sets this by setting or updating the variable Q_SERVICE_PLUGIN_CLASSES, which makes ML2 compatible when running with other service plugins (e.g. LBaaS and VPN). Fixes bug 1231622 Change-Id: I0ce1f5a42bd052995135ffac1ee5ef382d69789e --- lib/neutron_plugins/ml2 | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 71a0638670..8d2e303854 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -30,6 +30,9 @@ Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS:-vni_ranges=10 # Default VLAN TypeDriver options Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS:-} +# L3 Plugin to load for ML2 +ML2_L3_PLUGIN=${ML2_L3_PLUGIN:-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin} + function populate_ml2_config() { OPTS=$1 CONF=$2 @@ -48,13 +51,11 @@ function neutron_plugin_configure_common() { Q_PLUGIN_CLASS="neutron.plugins.ml2.plugin.Ml2Plugin" # The ML2 plugin delegates L3 routing/NAT functionality to # the L3 service plugin which must therefore be specified. - Q_L3_PLUGIN_CLASS=${Q_L3_PLUGIN_CLASS:-"neutron.services.l3_router.l3_router_plugin.L3RouterPlugin"} - if ini_has_option $NEUTRON_CONF DEFAULT service_plugins ; then - srv_plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins)","$Q_L3_PLUGIN_CLASS + if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then + Q_SERVICE_PLUGIN_CLASSES=$ML2_L3_PLUGIN else - srv_plugins=$Q_L3_PLUGIN_CLASS + Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$ML2_L3_PLUGIN" fi - iniset $NEUTRON_CONF DEFAULT service_plugins $srv_plugins } function neutron_plugin_configure_service() { From 19eed744225acdb08a35b4c8b7b13df3c0f078b7 Mon Sep 17 00:00:00 2001 From: Eoghan Glynn Date: Fri, 20 Sep 2013 21:11:25 +0000 Subject: [PATCH 0104/4438] Modified ceilometer alarm evaluator console script Take account of the modification to the alarm evaluator console script naming in the following commit: https://github.com/openstack/ceilometer/commit/bad5f18e Change-Id: Ic7fc3b8ad7be9dd2a5b5ed3c07e169691229bb4d --- lib/ceilometer | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 2afbc88b36..1b0431906a 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -5,7 +5,7 @@ # enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api # # To ensure Ceilometer alarming services are enabled also, further add to the localrc: -# enable_service ceilometer-alarm-notifier ceilometer-alarm-singleton +# enable_service ceilometer-alarm-notifier ceilometer-alarm-evaluator # Dependencies: # - functions @@ -139,13 +139,13 @@ function start_ceilometer() { screen_it ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF" screen_it ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" screen_it ceilometer-alarm-notifier "ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" - screen_it ceilometer-alarm-singleton "ceilometer-alarm-singleton --config-file $CEILOMETER_CONF" + screen_it ceilometer-alarm-evaluator "ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF" } # stop_ceilometer() - Stop running processes function stop_ceilometer() { # Kill the ceilometer screen windows - for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-singleton; do + for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do screen -S $SCREEN_NAME -p $serv -X kill done } From 835db2feadd1795201abaf4be00efc85ef9f8253 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 23 Sep 2013 14:17:06 -0400 Subject: [PATCH 0105/4438] print out git references for trees in order to be sure we understand the environment that's running in an upstream test, print out the git information for the tree. This will hopefully address questions of "which commit of tempest" is being used for particular tests. Change-Id: Ief4e8a17fd75945f02982d2adf8625fe927d823d --- functions | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/functions b/functions index e1a5f4be3a..209f13c7d7 100644 --- a/functions +++ b/functions @@ -548,12 +548,18 @@ function is_arch { # Uses global ``OFFLINE`` # git_clone remote dest-dir branch function git_clone { - [[ "$OFFLINE" = "True" ]] && return - GIT_REMOTE=$1 GIT_DEST=$2 GIT_REF=$3 + if [[ "$OFFLINE" = "True" ]]; then + echo "Running in offline mode, clones already exist" + # print out the results so we know what change was used in the logs + cd $GIT_DEST + git show --oneline + return + fi + if echo $GIT_REF | egrep -q "^refs"; then # If our branch name is a gerrit style refs/changes/... if [[ ! -d $GIT_DEST ]]; then @@ -595,6 +601,10 @@ function git_clone { fi fi + + # print out the results so we know what change was used in the logs + cd $GIT_DEST + git show --oneline } From 9a532b84474f5c6e9e11808bcda9566f20274011 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 23 Sep 2013 13:44:38 -0500 Subject: [PATCH 0106/4438] XenServer hypervisor plugin Convert XenServer hypervisor configuration in Nova to the new plugin setup. Change-Id: I8916560ca3f2dae8b8d8bcb60b7aa2eb5984cbcb --- lib/nova | 16 +---- lib/nova_plugins/hypervisor-xenserver | 85 +++++++++++++++++++++++++++ stack.sh | 19 ------ 3 files changed, 87 insertions(+), 33 deletions(-) create mode 100644 lib/nova_plugins/hypervisor-xenserver diff --git a/lib/nova b/lib/nova index 577c260d35..b058bd382e 100644 --- a/lib/nova +++ b/lib/nova @@ -76,15 +76,7 @@ SPICE_DIR=$DEST/spice-html5 # -------------------------- # Set defaults according to the virt driver -if [ "$VIRT_DRIVER" = 'xenserver' ]; then - PUBLIC_INTERFACE_DEFAULT=eth2 - GUEST_INTERFACE_DEFAULT=eth1 - # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args - FLAT_NETWORK_BRIDGE_DEFAULT=$(sed -e 's/.* flat_network_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline) - if is_service_enabled neutron; then - XEN_INTEGRATION_BRIDGE=$(sed -e 's/.* xen_integration_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline) - fi -elif [ "$VIRT_DRIVER" = 'baremetal' ]; then +if [ "$VIRT_DRIVER" = 'baremetal' ]; then NETWORK_MANAGER=${NETWORK_MANAGER:-FlatManager} PUBLIC_INTERFACE_DEFAULT=eth0 FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} @@ -537,16 +529,12 @@ function create_nova_conf() { SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"} iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL" fi - if [ "$VIRT_DRIVER" = 'xenserver' ]; then - VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} - else - VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} - fi if is_service_enabled n-novnc || is_service_enabled n-xvnc; then # Address on which instance vncservers will listen on compute hosts. # For multi-host, this should be the management ip of the compute host. VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} + VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} iniset $NOVA_CONF DEFAULT vnc_enabled true iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver new file mode 100644 index 0000000000..f47994f187 --- /dev/null +++ b/lib/nova_plugins/hypervisor-xenserver @@ -0,0 +1,85 @@ +# lib/nova_plugins/hypervisor-xenserver +# Configure the XenServer hypervisor + +# Enable with: +# VIRT_DRIVER=xenserver + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +PUBLIC_INTERFACE_DEFAULT=eth2 +GUEST_INTERFACE_DEFAULT=eth1 +# Allow ``build_domU.sh`` to specify the flat network bridge via kernel args +FLAT_NETWORK_BRIDGE_DEFAULT=$(sed -e 's/.* flat_network_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline) +if is_service_enabled neutron; then + XEN_INTEGRATION_BRIDGE=$(sed -e 's/.* xen_integration_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline) +fi + +VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} + + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor() { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor() { + if [ -z "$XENAPI_CONNECTION_URL" ]; then + die $LINENO "XENAPI_CONNECTION_URL is not specified" + fi + read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." + iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver" + iniset $NOVA_CONF DEFAULT xenapi_connection_url "$XENAPI_CONNECTION_URL" + iniset $NOVA_CONF DEFAULT xenapi_connection_username "$XENAPI_USER" + iniset $NOVA_CONF DEFAULT xenapi_connection_password "$XENAPI_PASSWORD" + iniset $NOVA_CONF DEFAULT flat_injected "False" + # Need to avoid crash due to new firewall support + XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} + iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER" +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor() { + # This function intentionally left blank + : +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor() { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor() { + # This function intentionally left blank + : +} + + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index 71e7317ee3..7bb4b59cc1 100755 --- a/stack.sh +++ b/stack.sh @@ -1011,25 +1011,6 @@ if is_service_enabled nova; then configure_nova_hypervisor - # XenServer - # --------- - - elif [ "$VIRT_DRIVER" = 'xenserver' ]; then - echo_summary "Using XenServer virtualization driver" - if [ -z "$XENAPI_CONNECTION_URL" ]; then - die $LINENO "XENAPI_CONNECTION_URL is not specified" - fi - read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." - iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver" - iniset $NOVA_CONF DEFAULT xenapi_connection_url "$XENAPI_CONNECTION_URL" - iniset $NOVA_CONF DEFAULT xenapi_connection_username "$XENAPI_USER" - iniset $NOVA_CONF DEFAULT xenapi_connection_password "$XENAPI_PASSWORD" - iniset $NOVA_CONF DEFAULT flat_injected "False" - # Need to avoid crash due to new firewall support - XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} - iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER" - - # OpenVZ # ------ From f4bd16ac84904eb3afc0eca283b63a1a6efd2c5a Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 23 Sep 2013 14:07:31 -0500 Subject: [PATCH 0107/4438] fake hypervisor plugin Convert fake hypervisor configuration in Nova to the new plugin setup. Change-Id: I8b1404ee97a2a65f0884efae642b98bb134cb2aa --- lib/nova_plugins/hypervisor-fake | 77 ++++++++++++++++++++++++++++++++ stack.sh | 20 --------- 2 files changed, 77 insertions(+), 20 deletions(-) create mode 100644 lib/nova_plugins/hypervisor-fake diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake new file mode 100644 index 0000000000..fe0d1900ee --- /dev/null +++ b/lib/nova_plugins/hypervisor-fake @@ -0,0 +1,77 @@ +# lib/nova_plugins/hypervisor-fake +# Configure the fake hypervisor + +# Enable with: +# VIRT_DRIVER=fake + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor() { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor() { + iniset $NOVA_CONF DEFAULT compute_driver "nova.virt.fake.FakeDriver" + # Disable arbitrary limits + iniset $NOVA_CONF DEFAULT quota_instances -1 + iniset $NOVA_CONF DEFAULT quota_cores -1 + iniset $NOVA_CONF DEFAULT quota_ram -1 + iniset $NOVA_CONF DEFAULT quota_floating_ips -1 + iniset $NOVA_CONF DEFAULT quota_fixed_ips -1 + iniset $NOVA_CONF DEFAULT quota_metadata_items -1 + iniset $NOVA_CONF DEFAULT quota_injected_files -1 + iniset $NOVA_CONF DEFAULT quota_injected_file_path_bytes -1 + iniset $NOVA_CONF DEFAULT quota_security_groups -1 + iniset $NOVA_CONF DEFAULT quota_security_group_rules -1 + iniset $NOVA_CONF DEFAULT quota_key_pairs -1 + iniset $NOVA_CONF DEFAULT scheduler_default_filters "RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter" +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor() { + # This function intentionally left blank + : +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor() { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor() { + # This function intentionally left blank + : +} + + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index 71e7317ee3..6bab2183d6 100755 --- a/stack.sh +++ b/stack.sh @@ -1104,26 +1104,6 @@ if is_service_enabled nova; then iniset $NOVA_CONF vmware integration_bridge $OVS_BRIDGE fi - # fake - # ---- - - elif [ "$VIRT_DRIVER" = 'fake' ]; then - echo_summary "Using fake Virt driver" - iniset $NOVA_CONF DEFAULT compute_driver "nova.virt.fake.FakeDriver" - # Disable arbitrary limits - iniset $NOVA_CONF DEFAULT quota_instances -1 - iniset $NOVA_CONF DEFAULT quota_cores -1 - iniset $NOVA_CONF DEFAULT quota_ram -1 - iniset $NOVA_CONF DEFAULT quota_floating_ips -1 - iniset $NOVA_CONF DEFAULT quota_fixed_ips -1 - iniset $NOVA_CONF DEFAULT quota_metadata_items -1 - iniset $NOVA_CONF DEFAULT quota_injected_files -1 - iniset $NOVA_CONF DEFAULT quota_injected_file_path_bytes -1 - iniset $NOVA_CONF DEFAULT quota_security_groups -1 - iniset $NOVA_CONF DEFAULT quota_security_group_rules -1 - iniset $NOVA_CONF DEFAULT quota_key_pairs -1 - iniset $NOVA_CONF DEFAULT scheduler_default_filters "RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter" - # Default libvirt # --------------- From c3431bfdd90b3d149b119038d19f6a22bc278dc0 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Fri, 6 Sep 2013 15:30:22 -0400 Subject: [PATCH 0108/4438] Clean up automated changes to requirements Some of us like to reuse sandboxes, and keep them up to date. This is very difficult to do if devstack leaves modifications to requirements.txt files after a run, since 'git pull' may refuse to overwrite those changes. This modification has devstack undo the changes to the requirements files, to leave the sandbox in a clean state again. Change-Id: Ia2d928ade8141b59b56a2c4548d760bf6911a3e5 --- functions | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/functions b/functions index e1a5f4be3a..1c45851d91 100644 --- a/functions +++ b/functions @@ -1216,7 +1216,10 @@ function setup_develop() { echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" # Don't update repo if local changes exist - if (cd $project_dir && git diff --quiet); then + (cd $project_dir && git diff --quiet) + local update_requirements=$? + + if [ $update_requirements -eq 0 ]; then (cd $REQUIREMENTS_DIR; \ $SUDO_CMD python update.py $project_dir) fi @@ -1224,6 +1227,11 @@ function setup_develop() { pip_install -e $project_dir # ensure that further actions can do things like setup.py sdist safe_chown -R $STACK_USER $1/*.egg-info + + # Undo requirements changes, if we made them + if [ $update_requirements -eq 0 ]; then + (cd $project_dir && git checkout -- requirements.txt test-requirements.txt setup.py) + fi } From 5470701e10ee68c80860d4cf7e0fa5d8a913c288 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Tue, 17 Sep 2013 12:07:48 +1000 Subject: [PATCH 0109/4438] Fix swift httpd on fedora Implements a fedora equivalent of ubuntu's sites-enabled and moves enabling of mod_wsgi to the installation period so that it doesn't have to be handled in a platform dependant way later. Fixes: bug 1226363 Change-Id: I85325179f1792d985b0375572abfe8c8a82fecc3 --- lib/apache | 27 +++++++++++++++++++++++++++ lib/horizon | 5 ----- lib/swift | 16 ++++++---------- 3 files changed, 33 insertions(+), 15 deletions(-) diff --git a/lib/apache b/lib/apache index d811f87510..3a1f6f1263 100644 --- a/lib/apache +++ b/lib/apache @@ -6,6 +6,8 @@ # is_apache_enabled_service # install_apache_wsgi # config_apache_wsgi +# enable_apache_site +# disable_apache_site # start_apache_server # stop_apache_server # restart_apache_server @@ -57,16 +59,41 @@ function install_apache_wsgi() { if is_ubuntu; then # Install apache2, which is NOPRIME'd install_package apache2 libapache2-mod-wsgi + # WSGI isn't enabled by default, enable it + sudo a2enmod wsgi elif is_fedora; then sudo rm -f /etc/httpd/conf.d/000-* install_package httpd mod_wsgi elif is_suse; then install_package apache2 apache2-mod_wsgi + # WSGI isn't enabled by default, enable it + sudo a2enmod wsgi else exit_distro_not_supported "apache installation" fi } +# enable_apache_site() - Enable a particular apache site +function enable_apache_site() { + local site=$@ + if is_ubuntu; then + sudo a2ensite ${site} + elif is_fedora; then + # fedora conf.d is only imported if it ends with .conf so this is approx the same + sudo mv /etc/$APACHE_NAME/$APACHE_CONF_DIR/${site} /etc/$APACHE_NAME/$APACHE_CONF_DIR/${site}.conf + fi +} + +# disable_apache_site() - Disable a particular apache site +function disable_apache_site() { + local site=$@ + if is_ubuntu; then + sudo a2dissite ${site} + elif is_fedora; then + sudo mv /etc/$APACHE_NAME/$APACHE_CONF_DIR/${site}.conf /etc/$APACHE_NAME/$APACHE_CONF_DIR/${site} + fi +} + # start_apache_server() - Start running apache server function start_apache_server() { start_service $APACHE_NAME diff --git a/lib/horizon b/lib/horizon index e55bc152f6..5973eb2a9f 100644 --- a/lib/horizon +++ b/lib/horizon @@ -123,8 +123,6 @@ function init_horizon() { # Be a good citizen and use the distro tools here sudo touch $horizon_conf sudo a2ensite horizon.conf - # WSGI isn't enabled by default, enable it - sudo a2enmod wsgi elif is_fedora; then if [[ "$os_RELEASE" -ge "18" ]]; then # fedora 18 has Require all denied in its httpd.conf @@ -132,9 +130,6 @@ function init_horizon() { HORIZON_REQUIRE='Require all granted' fi sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf - elif is_suse; then - # WSGI isn't enabled by default, enable it - sudo a2enmod wsgi else exit_distro_not_supported "apache configuration" fi diff --git a/lib/swift b/lib/swift index 8741e551ad..9c80802ba9 100644 --- a/lib/swift +++ b/lib/swift @@ -115,11 +115,11 @@ function cleanup_swift() { # _cleanup_swift_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file function _cleanup_swift_apache_wsgi() { sudo rm -f $SWIFT_APACHE_WSGI_DIR/*.wsgi - ! is_fedora && sudo a2dissite proxy-server + disable_apache_site proxy-server for node_number in ${SWIFT_REPLICAS_SEQ}; do for type in object container account; do site_name=${type}-server-${node_number} - ! is_fedora && sudo a2dissite ${site_name} + disable_apache_site ${site_name} sudo rm -f /etc/$APACHE_NAME/$APACHE_CONF_DIR/${site_name} done done @@ -140,13 +140,13 @@ function _config_swift_apache_wsgi() { s/%APACHE_NAME%/${APACHE_NAME}/g; s/%USER%/${STACK_USER}/g; " -i ${apache_vhost_dir}/proxy-server + enable_apache_site proxy-server sudo cp ${SWIFT_DIR}/examples/wsgi/proxy-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/proxy-server.wsgi sudo sed -e " /^#/d;/^$/d; s/%SERVICECONF%/proxy-server.conf/g; " -i ${SWIFT_APACHE_WSGI_DIR}/proxy-server.wsgi - ! is_fedora && sudo a2ensite proxy-server # copy apache vhost file and set name and port for node_number in ${SWIFT_REPLICAS_SEQ}; do @@ -161,7 +161,7 @@ function _config_swift_apache_wsgi() { s/%APACHE_NAME%/${APACHE_NAME}/g; s/%USER%/${STACK_USER}/g; " -i ${apache_vhost_dir}/object-server-${node_number} - ! is_fedora && sudo a2ensite object-server-${node_number} + enable_apache_site object-server-${node_number} sudo cp ${SWIFT_DIR}/examples/wsgi/object-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/object-server-${node_number}.wsgi sudo sed -e " @@ -177,7 +177,7 @@ function _config_swift_apache_wsgi() { s/%APACHE_NAME%/${APACHE_NAME}/g; s/%USER%/${STACK_USER}/g; " -i ${apache_vhost_dir}/container-server-${node_number} - ! is_fedora && sudo a2ensite container-server-${node_number} + enable_apache_site container-server-${node_number} sudo cp ${SWIFT_DIR}/examples/wsgi/container-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/container-server-${node_number}.wsgi sudo sed -e " @@ -193,18 +193,14 @@ function _config_swift_apache_wsgi() { s/%APACHE_NAME%/${APACHE_NAME}/g; s/%USER%/${STACK_USER}/g; " -i ${apache_vhost_dir}/account-server-${node_number} - ! is_fedora && sudo a2ensite account-server-${node_number} + enable_apache_site account-server-${node_number} sudo cp ${SWIFT_DIR}/examples/wsgi/account-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi sudo sed -e " /^#/d;/^$/d; s/%SERVICECONF%/account-server\/${node_number}.conf/g; " -i ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi - done - - # WSGI isn't enabled by default, enable it - ! is_fedora && sudo a2enmod wsgi } # configure_swift() - Set config files, create data dirs and loop image From a00e5f8810b6ca3b0b5d63cc228125e19bc91955 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Tue, 17 Sep 2013 12:47:03 +1000 Subject: [PATCH 0110/4438] Allow keystone to run from apache Provide a template for running keystone as a mod_wsgi process and enable it from configuration. Based on: https://review.openstack.org/#/c/36474/ Also-by: zhang-hare Implements: blueprint devstack-setup-apache-keystone Change-Id: Icc9d7ddfa4a488c08816ff4ae0b53c0134a1016b --- files/apache-keystone.template | 22 ++++++++++++++++ lib/keystone | 47 ++++++++++++++++++++++++++++++++-- 2 files changed, 67 insertions(+), 2 deletions(-) create mode 100644 files/apache-keystone.template diff --git a/files/apache-keystone.template b/files/apache-keystone.template new file mode 100644 index 0000000000..919452a040 --- /dev/null +++ b/files/apache-keystone.template @@ -0,0 +1,22 @@ +Listen %PUBLICPORT% +Listen %ADMINPORT% + + + WSGIDaemonProcess keystone-public processes=5 threads=1 user=%USER% + WSGIProcessGroup keystone-public + WSGIScriptAlias / %PUBLICWSGI% + WSGIApplicationGroup %{GLOBAL} + ErrorLog /var/log/%APACHE_NAME%/keystone + LogLevel debug + CustomLog /var/log/%APACHE_NAME%/access.log combined + + + + WSGIDaemonProcess keystone-admin processes=5 threads=1 user=%USER% + WSGIProcessGroup keystone-admin + WSGIScriptAlias / %ADMINWSGI% + WSGIApplicationGroup %{GLOBAL} + ErrorLog /var/log/%APACHE_NAME%/keystone + LogLevel debug + CustomLog /var/log/%APACHE_NAME%/access.log combined + diff --git a/lib/keystone b/lib/keystone index 3642904e1c..c4b2dff93b 100755 --- a/lib/keystone +++ b/lib/keystone @@ -14,11 +14,13 @@ # # install_keystone # configure_keystone +# _config_keystone_apache_wsgi # init_keystone # start_keystone # create_keystone_accounts # stop_keystone # cleanup_keystone +# _cleanup_keystone_apache_wsgi # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -34,6 +36,7 @@ KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini} KEYSTONE_AUTH_CACHE_DIR=${KEYSTONE_AUTH_CACHE_DIR:-/var/cache/keystone} +KEYSTONE_WSGI_DIR=${KEYSTONE_WSGI_DIR:-/var/www/keystone} KEYSTONECLIENT_DIR=$DEST/python-keystoneclient @@ -86,6 +89,33 @@ function cleanup_keystone() { : } +# _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file +function _cleanup_keystone_apache_wsgi() { + sudo rm -f $KEYSTONE_WSGI_DIR/*.wsgi + disable_apache_site keystone + sudo rm -f /etc/$APACHE_NAME/$APACHE_CONF_DIR/keystone +} + +# _config_keystone_apache_wsgi() - Set WSGI config files of Keystone +function _config_keystone_apache_wsgi() { + sudo mkdir -p $KEYSTONE_WSGI_DIR + + # copy proxy vhost and wsgi file + sudo cp $KEYSTONE_DIR/httpd/keystone.py $KEYSTONE_WSGI_DIR/main + sudo cp $KEYSTONE_DIR/httpd/keystone.py $KEYSTONE_WSGI_DIR/admin + + sudo cp $FILES/apache-keystone.template /etc/$APACHE_NAME/$APACHE_CONF_DIR/keystone + sudo sed -e " + s|%PUBLICPORT%|$KEYSTONE_SERVICE_PORT|g; + s|%ADMINPORT%|$KEYSTONE_AUTH_PORT|g; + s|%APACHE_NAME%|$APACHE_NAME|g; + s|%PUBLICWSGI%|$KEYSTONE_WSGI_DIR/main|g; + s|%ADMINWSGI%|$KEYSTONE_WSGI_DIR/admin|g; + s|%USER%|$STACK_USER|g + " -i /etc/$APACHE_NAME/$APACHE_CONF_DIR/keystone + enable_apache_site keystone +} + # configure_keystone() - Set config files, create data dirs, etc function configure_keystone() { if [[ ! -d $KEYSTONE_CONF_DIR ]]; then @@ -204,6 +234,10 @@ function configure_keystone() { cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_CONF_DIR/logging.conf iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG" iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production" + + if is_apache_enabled_service key; then + _config_keystone_apache_wsgi + fi } # create_keystone_accounts() - Sets up common required keystone accounts @@ -316,6 +350,9 @@ function install_keystone() { fi git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH setup_develop $KEYSTONE_DIR + if is_apache_enabled_service key; then + install_apache_wsgi + fi } # start_keystone() - Start running processes, including screen @@ -326,8 +363,14 @@ function start_keystone() { service_port=$KEYSTONE_SERVICE_PORT_INT fi - # Start Keystone in a screen window - screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" + if is_apache_enabled_service key; then + restart_apache_server + screen_it key "cd $KEYSTONE_DIR && sudo tail -f /var/log/$APACHE_NAME/keystone" + else + # Start Keystone in a screen window + screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" + fi + echo "Waiting for keystone to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s http://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then die $LINENO "keystone did not start" From 06d17eb54ad37e6d21eafcded52cc581a56d328b Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 24 Sep 2013 07:04:11 -0400 Subject: [PATCH 0111/4438] make git show not display a diff if the top commit includes actual content, this was being displayed in the devstack log, which was just confusing. --quiet suppresses this. Change-Id: Id52604d3b2b9a1372746120cb5d8d741c35576b7 --- functions | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/functions b/functions index 209f13c7d7..d590ac52f6 100644 --- a/functions +++ b/functions @@ -556,7 +556,7 @@ function git_clone { echo "Running in offline mode, clones already exist" # print out the results so we know what change was used in the logs cd $GIT_DEST - git show --oneline + git show --oneline --quiet return fi @@ -604,7 +604,7 @@ function git_clone { # print out the results so we know what change was used in the logs cd $GIT_DEST - git show --oneline + git show --oneline --quiet } From fb434b28d057d279a8351776f7909102def571dd Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Tue, 24 Sep 2013 15:58:37 +0100 Subject: [PATCH 0112/4438] xenapi: Get rid of prompt settings Remove the setting of prompt from prepare guest, keeping the scripts smaller. Change-Id: Ifb2b3aba07831e1552d6a1c6cd2081592c43ccf6 --- tools/xen/prepare_guest.sh | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index 6ec5ffa546..05ac86cf99 100755 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -56,11 +56,6 @@ EOF # Give ownership of /opt/stack to stack user chown -R $STACK_USER /opt/stack -# Make our ip address hostnames look nice at the command prompt -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> /opt/stack/.bashrc -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> /root/.bashrc -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> /etc/profile - function setup_vimrc { if [ ! -e $1 ]; then # Simple but usable vimrc From f5002ef12a890fd3110782c873d99487a4d05b17 Mon Sep 17 00:00:00 2001 From: Roman Prykhodchenko Date: Tue, 24 Sep 2013 19:09:26 +0300 Subject: [PATCH 0113/4438] Expose all versions of Ironic API In the observable future new versions of the Ironic API will appear. That's why it's reasonable to expose the endpoint that will provide access to all versions of the API. Closes-Bug: #1229780 Change-Id: I4ec2b45688da3fa6c0d43e8be60885774cfbffd6 --- lib/ironic | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ironic b/lib/ironic index 2ce5038ea4..072d2ded82 100644 --- a/lib/ironic +++ b/lib/ironic @@ -148,9 +148,9 @@ create_ironic_accounts() { keystone endpoint-create \ --region RegionOne \ --service_id $IRONIC_SERVICE \ - --publicurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT/v1/" \ - --adminurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT/v1/" \ - --internalurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT/v1/" + --publicurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \ + --adminurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \ + --internalurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" fi fi } From 6d23500aa66e3d399cd263c2fb1d07dba0e0170c Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Wed, 18 Sep 2013 20:27:08 +0000 Subject: [PATCH 0114/4438] Default to the ML2 plugin in Neutron instead of OVS In Icehouse, the OVS and LinuxBridge plugins are being deprecated in favor of the Modular Layer 2 (ML2) plugin. This change modifies devstack to default to ML2 when Neutron is used for networking. Fixes bug 1220745 Change-Id: I9a4c84b04727a710219fc11f862a655309ffb99b --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 4a3d1b06a6..b1f96fce79 100644 --- a/lib/neutron +++ b/lib/neutron @@ -88,7 +88,7 @@ NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} # Default Neutron Plugin -Q_PLUGIN=${Q_PLUGIN:-openvswitch} +Q_PLUGIN=${Q_PLUGIN:-ml2} # Default Neutron Port Q_PORT=${Q_PORT:-9696} # Default Neutron Host From 2dc11fb5c843ccf057fac9e01cf6beca7d877421 Mon Sep 17 00:00:00 2001 From: Morgan Fainberg Date: Tue, 24 Sep 2013 23:43:08 -0700 Subject: [PATCH 0115/4438] Update user_attribute_ignore for LDAP Identity config With a recent patch to keystone, the use of tenantId, tenant_id, and default_project_id was normalized to reference default_project_id for all cases internally and translate to the expected results at the controller (v2.0 returns tenantId, v3 returns default_project_id). Devstack must now properly ignore the expected mapped LDAP attribute of 'default_project_id' instead of the old 'tenantId'. Without this fix devstack will fail when using the LDAP identity backend because the 'default_project_id' has been made a special case that requires the operator of a Openstack cloud to explicitly choose an attribute to map 'default_project_id' to if storing that value is desired. Without explicitly mapping that attribute and not having it in the 'user_attribute_ignore' config option, the user_creates can fail. related-bug: 1219739 Change-Id: I1dd3719de50f6d0948b3a9743e32a03d0ac56b3c --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 3642904e1c..bc5d1511b8 100755 --- a/lib/keystone +++ b/lib/keystone @@ -115,7 +115,7 @@ function configure_keystone() { iniset $KEYSTONE_CONF ldap user "dc=Manager,dc=openstack,dc=org" iniset $KEYSTONE_CONF ldap suffix "dc=openstack,dc=org" iniset $KEYSTONE_CONF ldap use_dumb_member "True" - iniset $KEYSTONE_CONF ldap user_attribute_ignore "enabled,email,tenants,tenantId" + iniset $KEYSTONE_CONF ldap user_attribute_ignore "enabled,email,tenants,default_project_id" iniset $KEYSTONE_CONF ldap tenant_attribute_ignore "enabled" iniset $KEYSTONE_CONF ldap tenant_domain_id_attribute "businessCategory" iniset $KEYSTONE_CONF ldap tenant_desc_attribute "description" From a8d41e3af70309fb9c8df150ef162685bae41ee4 Mon Sep 17 00:00:00 2001 From: Sirushti Murugesan Date: Wed, 25 Sep 2013 11:30:31 +0530 Subject: [PATCH 0116/4438] Normalise RECLONE flag to True Or False. RECLONE flag now uses function trueorfalse for flag handling. Added more flag cases to normalisation function trueorfalse. Fixes bug #1200382 Change-Id: I0738537c87634281c6a92fa93b7f84a6b0dad497 --- functions | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/functions b/functions index 4c4487f9cb..6cdee78db6 100644 --- a/functions +++ b/functions @@ -551,6 +551,7 @@ function git_clone { GIT_REMOTE=$1 GIT_DEST=$2 GIT_REF=$3 + RECLONE=$(trueorfalse False $RECLONE) if [[ "$OFFLINE" = "True" ]]; then echo "Running in offline mode, clones already exist" @@ -576,7 +577,7 @@ function git_clone { cd $GIT_DEST # This checkout syntax works for both branches and tags git checkout $GIT_REF - elif [[ "$RECLONE" == "yes" ]]; then + elif [[ "$RECLONE" = "True" ]]; then # if it does exist then simulate what clone does if asked to RECLONE cd $GIT_DEST # set the url to pull from and fetch @@ -1260,16 +1261,16 @@ function stop_service() { # Normalize config values to True or False -# Accepts as False: 0 no false False FALSE -# Accepts as True: 1 yes true True TRUE +# Accepts as False: 0 no No NO false False FALSE +# Accepts as True: 1 yes Yes YES true True TRUE # VAR=$(trueorfalse default-value test-value) function trueorfalse() { local default=$1 local testval=$2 [[ -z "$testval" ]] && { echo "$default"; return; } - [[ "0 no false False FALSE" =~ "$testval" ]] && { echo "False"; return; } - [[ "1 yes true True TRUE" =~ "$testval" ]] && { echo "True"; return; } + [[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; } + [[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; } echo "$default" } From 45ea08115074a78b2bb31cf9f880eddf1e7051aa Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 25 Sep 2013 10:00:29 +0100 Subject: [PATCH 0117/4438] Fix typo in lib/heat With the change https://review.openstack.org/43006 a typo was introduced. This change fixes it. Change-Id: Iebcbfe49d779552c17f6ab216976149f332b772c --- lib/heat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index ac769162db..ff9473ecdb 100644 --- a/lib/heat +++ b/lib/heat @@ -1,4 +1,4 @@ -etup lib/heat +# lib/heat # Install and start **Heat** service # To enable, add the following to localrc From 93f3b8693af1250b4b6925e83c33662c4dcd9636 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Tue, 24 Sep 2013 17:35:00 +0100 Subject: [PATCH 0118/4438] xenapi: enable user interaction with stack.sh In XenServer, devstack runs inside a virtual machine. This makes it hard for the user to interact with stack.sh should a parameter be missing. This change will create an upstart job (devstack) that runs stack.sh with console enabled so user can interact with it by running vncviewer or using XenCenter. Logging the output is also disabled, stamp files are used instead to detect the script run status in case install_os_domU.sh is used. As run.sh.log is removed, standard devstack logging should be used. The change also removes the environment settings from run.sh, as they are not needed, they should be specified in localrc. This way user cannot get different experiences by using unstack.sh/stack.sh or run.sh Also a proper unstack.sh is called instead of killing screen in run.sh Change-Id: I7eb12bd74746cc7a1db3aa9fd68ece645a50001d --- tools/xen/build_xva.sh | 40 ++++++++++++++++++++++++++++-------- tools/xen/install_os_domU.sh | 24 +++++++++------------- 2 files changed, 41 insertions(+), 23 deletions(-) diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index d0cdf17391..7272fe2664 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -93,13 +93,34 @@ mkdir -p $STAGING_DIR/opt/stack/devstack tar xf /tmp/devstack.tar -C $STAGING_DIR/opt/stack/devstack cd $TOP_DIR -# Run devstack on launch -cat <$STAGING_DIR/etc/rc.local -# network restart required for getting the right gateway -/etc/init.d/networking restart -chown -R $STACK_USER /opt/stack -su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" $STACK_USER -exit 0 +# Create an upstart job (task) for devstack, which can interact with the console +cat >$STAGING_DIR/etc/init/devstack.conf << EOF +start on stopped rc RUNLEVEL=[2345] + +console output +task + +pre-start script + rm -f /var/run/devstack.succeeded +end script + +script + initctl stop hvc0 || true + + # Read any leftover characters from standard input + while read -n 1 -s -t 0.1 -r ignored; do + true + done + + clear + + chown -R $STACK_USER /opt/stack + + if su -c "/opt/stack/run.sh" $STACK_USER; then + touch /var/run/devstack.succeeded + fi + initctl start hvc0 > /dev/null 2>&1 +end script EOF # Configure the hostname @@ -138,8 +159,9 @@ fi # Configure run.sh cat <$STAGING_DIR/opt/stack/run.sh #!/bin/bash +set -eux cd /opt/stack/devstack -killall screen -VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=$MULTI_HOST HOST_IP_IFACE=$HOST_IP_IFACE $STACKSH_PARAMS ./stack.sh +./unstack.sh || true +./stack.sh EOF chmod 755 $STAGING_DIR/opt/stack/run.sh diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 110bbd998c..a0cfe27caf 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -350,25 +350,20 @@ COPYENV=${COPYENV:-1} if [ "$WAIT_TILL_LAUNCH" = "1" ] && [ -e ~/.ssh/id_rsa.pub ] && [ "$COPYENV" = "1" ]; then set +x - echo "VM Launched - Waiting for startup script" - # wait for log to appear - while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "[ -e run.sh.log ]"; do + echo "VM Launched - Waiting for devstack to start" + while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "service devstack status | grep -q running"; do sleep 10 done - echo -n "Running" - while [ `ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS pgrep -c run.sh` -ge 1 ] - do + echo -n "devstack is running" + while ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "service devstack status | grep -q running"; do sleep 10 echo -n "." done echo "done!" set -x - # output the run.sh.log - ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'cat run.sh.log' - - # Fail if the expected text is not found - ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'cat run.sh.log' | grep -q 'stack.sh completed in' + # Fail if devstack did not succeed + ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'test -e /var/run/devstack.succeeded' set +x echo "################################################################################" @@ -382,11 +377,12 @@ else echo "" echo "All Finished!" echo "Now, you can monitor the progress of the stack.sh installation by " - echo "tailing /opt/stack/run.sh.log from within your domU." + echo "looking at the console of your domU / checking the log files." echo "" echo "ssh into your domU now: 'ssh stack@$OS_VM_MANAGEMENT_ADDRESS' using your password" - echo "and then do: 'tail -f /opt/stack/run.sh.log'" + echo "and then do: 'sudo service devstack status' to check if devstack is still running." + echo "Check that /var/run/devstack.succeeded exists" echo "" - echo "When the script completes, you can then visit the OpenStack Dashboard" + echo "When devstack completes, you can visit the OpenStack Dashboard" echo "at http://$OS_VM_SERVICES_ADDRESS, and contact other services at the usual ports." fi From 45a21f0e54def308f1d05440f030b49346b73fad Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 25 Sep 2013 10:27:27 -0400 Subject: [PATCH 0119/4438] change git show to | head -1 git show uses default system pager, which for people that have funky pagers, goes sideways. Pipe this through head -1 to ensure we only get the single change line we care about. Change-Id: Iff22612b555bf58fe12101701cfe593f37e8f8de --- functions | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/functions b/functions index f30c7adf15..8c0bc2893c 100644 --- a/functions +++ b/functions @@ -556,7 +556,7 @@ function git_clone { echo "Running in offline mode, clones already exist" # print out the results so we know what change was used in the logs cd $GIT_DEST - git show --oneline --quiet + git show --oneline | head -1 return fi @@ -604,7 +604,7 @@ function git_clone { # print out the results so we know what change was used in the logs cd $GIT_DEST - git show --oneline --quiet + git show --oneline | head -1 } From de60f48ad9d721bafb376a4b18516f3aad60527a Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 25 Sep 2013 15:38:24 +0100 Subject: [PATCH 0120/4438] fix tee errors Do not specify /dev/fd3 directly, use >&3 instead. This change enables to use stack.sh as an upstart script, and with VERBOSE=False, it will print the expected messages. Fixes bug 1230342 Change-Id: I6e3a81fd435e8c46d553bfdee08f8bf42d0f4387 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 71e7317ee3..449eb06751 100755 --- a/stack.sh +++ b/stack.sh @@ -518,7 +518,7 @@ if [[ -n "$LOGFILE" ]]; then # Set fd 1 and 2 to primary logfile exec 1> "${LOGFILE}" 2>&1 # Set fd 6 to summary logfile and stdout - exec 6> >( tee "${SUMFILE}" /dev/fd/3 ) + exec 6> >( tee "${SUMFILE}" >&3 ) fi echo_summary "stack.sh log $LOGFILE" From 7b7bc9209a533c371a13946eac35f3fa6243f74a Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 23 Sep 2013 13:56:19 -0500 Subject: [PATCH 0121/4438] vSphere hypervisor plugin Convert vSphere hypervisor configuration in Nova to the new plugin setup. Change-Id: Ibf6f5918e6a8d8a7b7784dac832d806e993cff8f --- lib/nova_plugins/hypervisor-vsphere | 72 +++++++++++++++++++++++++++++ stack.sh | 16 ------- 2 files changed, 72 insertions(+), 16 deletions(-) create mode 100644 lib/nova_plugins/hypervisor-vsphere diff --git a/lib/nova_plugins/hypervisor-vsphere b/lib/nova_plugins/hypervisor-vsphere new file mode 100644 index 0000000000..1666246374 --- /dev/null +++ b/lib/nova_plugins/hypervisor-vsphere @@ -0,0 +1,72 @@ +# lib/nova_plugins/hypervisor-vsphere +# Configure the vSphere hypervisor + +# Enable with: +# VIRT_DRIVER=vsphere + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor() { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor() { + iniset $NOVA_CONF DEFAULT compute_driver "vmwareapi.VMwareVCDriver" + VMWAREAPI_USER=${VMWAREAPI_USER:-"root"} + iniset $NOVA_CONF vmware host_ip "$VMWAREAPI_IP" + iniset $NOVA_CONF vmware host_username "$VMWAREAPI_USER" + iniset $NOVA_CONF vmware host_password "$VMWAREAPI_PASSWORD" + iniset $NOVA_CONF vmware cluster_name "$VMWAREAPI_CLUSTER" + if is_service_enabled neutron; then + iniset $NOVA_CONF vmware integration_bridge $OVS_BRIDGE + fi +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor() { + # This function intentionally left blank + : +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor() { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor() { + # This function intentionally left blank + : +} + + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index 6bab2183d6..f42437104d 100755 --- a/stack.sh +++ b/stack.sh @@ -1089,22 +1089,6 @@ if is_service_enabled nova; then iniset $NOVA_CONF DEFAULT powervm_img_local_path $POWERVM_IMG_LOCAL_PATH - # vSphere API - # ----------- - - elif [ "$VIRT_DRIVER" = 'vsphere' ]; then - echo_summary "Using VMware vCenter driver" - iniset $NOVA_CONF DEFAULT compute_driver "vmwareapi.VMwareVCDriver" - VMWAREAPI_USER=${VMWAREAPI_USER:-"root"} - iniset $NOVA_CONF vmware host_ip "$VMWAREAPI_IP" - iniset $NOVA_CONF vmware host_username "$VMWAREAPI_USER" - iniset $NOVA_CONF vmware host_password "$VMWAREAPI_PASSWORD" - iniset $NOVA_CONF vmware cluster_name "$VMWAREAPI_CLUSTER" - if is_service_enabled neutron; then - iniset $NOVA_CONF vmware integration_bridge $OVS_BRIDGE - fi - - # Default libvirt # --------------- From da481d0d0a641c72fbc98c57711370f3f7309113 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Thu, 26 Sep 2013 13:57:02 +0100 Subject: [PATCH 0122/4438] xenapi: display IP and DevStack result on console The devstack setup service can update /etc/issue, displaying the status of the installation and the VM's management IP. With this change, after the devstack service finsihed, the login prompt will look like this: OpenStack VM - Installed by DevStack Management IP: 10.219.3.108 Devstack run: SUCCEEDED DevStackOSDomU login: This helps people to log in to their system. Change-Id: Idd6bbd5faf9ced5618cd3e95191bfc3b89473fa2 --- tools/xen/build_xva.sh | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index 7272fe2664..958102b29c 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -119,6 +119,20 @@ script if su -c "/opt/stack/run.sh" $STACK_USER; then touch /var/run/devstack.succeeded fi + + # Update /etc/issue + { + echo "OpenStack VM - Installed by DevStack" + IPADDR=\$(ip -4 address show eth0 | sed -n 's/.*inet \\([0-9\.]\\+\\).*/\1/p') + echo " Management IP: \$IPADDR" + echo -n " Devstack run: " + if [ -e /var/run/devstack.succeeded ]; then + echo "SUCCEEDED" + else + echo "FAILED" + fi + echo "" + } > /etc/issue initctl start hvc0 > /dev/null 2>&1 end script EOF From 3d84cf2d7c323750971cf2d27f3a4eaa26cb7a9f Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Thu, 12 Sep 2013 13:25:54 -0400 Subject: [PATCH 0123/4438] Enable tenant isolation to tempest for neutron This commit re-enables tenant isolation in tempest for neutron. This is a requirement for running tempest in parallel. This commit depends on tempest change I7587c85017cca09f7a67eae0670f67b2bceacb60 Fixes bug 1216076 Change-Id: I63a30bacd48cecd110fb90e1fc718249c2b1904b --- lib/tempest | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/tempest b/lib/tempest index bc0b18d9f4..646d42b8bd 100644 --- a/lib/tempest +++ b/lib/tempest @@ -230,11 +230,6 @@ function configure_tempest() { # Compute iniset $TEMPEST_CONF compute change_password_available False - # Note(nati) current tempest don't create network for each tenant - # so reuse same tenant for now - if is_service_enabled neutron; then - TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-False} - fi iniset $TEMPEST_CONF compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED iniset $TEMPEST_CONF compute network_for_ssh $PRIVATE_NETWORK_NAME From 93a7a50c1d4ff1a5cb5e6bd2162102c27fcdbe2d Mon Sep 17 00:00:00 2001 From: Vincent Hou Date: Fri, 27 Sep 2013 06:16:54 -0400 Subject: [PATCH 0124/4438] Add the creation of /var/run/openstack when zeromq is chosen Fixed Bug 1200539. Change-Id: I270623da7026e94d9ece4d5f510cad5a6c4d79ff --- lib/rpc_backend | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/rpc_backend b/lib/rpc_backend index ff87aae2af..63edc07460 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -131,6 +131,9 @@ function install_rpc_backend() { else exit_distro_not_supported "zeromq installation" fi + # Necessary directory for socket location. + sudo mkdir -p /var/run/openstack + sudo chown $STACK_USER /var/run/openstack fi } From 384454de57299981f8020e75bab781f73bacae86 Mon Sep 17 00:00:00 2001 From: Giulio Fidente Date: Fri, 27 Sep 2013 13:17:34 +0200 Subject: [PATCH 0125/4438] ensure tgtd is running in debug mode this change enables on-the-fly tgtd debug before starting cinder Change-Id: I193bfd77c5a82e8347d75e2a7fe670a6e25f5558 --- lib/cinder | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/cinder b/lib/cinder index bec65ed234..ccf38b4dea 100644 --- a/lib/cinder +++ b/lib/cinder @@ -496,6 +496,8 @@ function start_cinder() { # name, and would need to be adjusted too exit_distro_not_supported "restarting tgt" fi + # NOTE(gfidente): ensure tgtd is running in debug mode + sudo tgtadm --mode system --op update --name debug --value on fi screen_it c-api "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" From 53d6fa604df71ea7294ee9043e420d155c6fd846 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Fri, 27 Sep 2013 12:30:28 +0100 Subject: [PATCH 0126/4438] xenapi: set dhcp timeout on VM installation Set the DHCP timeout to 120 seconds during virtual machine installation. Some users failed to run devstack, due to a low DHCP timeout setting. The default value is 60 seconds. This change sets the value to 120 secs, that should give enough time for most people. Change-Id: I15fde45ed0d005c1a8621134eee6c3c338b5be5d --- tools/xen/devstackubuntupreseed.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/devstackubuntupreseed.cfg b/tools/xen/devstackubuntupreseed.cfg index c559b1e9f5..6a1ae89fd9 100644 --- a/tools/xen/devstackubuntupreseed.cfg +++ b/tools/xen/devstackubuntupreseed.cfg @@ -34,7 +34,7 @@ d-i netcfg/choose_interface select auto # If you have a slow dhcp server and the installer times out waiting for # it, this might be useful. -#d-i netcfg/dhcp_timeout string 60 +d-i netcfg/dhcp_timeout string 120 # If you prefer to configure the network manually, uncomment this line and # the static network configuration below. From d9883407e910da0fc8307f12f76c0c8e594321fe Mon Sep 17 00:00:00 2001 From: Cyril Roelandt Date: Fri, 27 Sep 2013 15:16:51 +0000 Subject: [PATCH 0127/4438] Revert "Revert "Swift: configure Ceilometer when it is enabled"" Commit 6650fda680310e71b5dda7764bf4033f670d90f0 is no longer needed: https://review.openstack.org/#/c/46048 has been merged. This reverts commit 6650fda680310e71b5dda7764bf4033f670d90f0. Change-Id: I47d28a292667eb8ece2061c0ef19c7c925e5747c --- lib/swift | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/swift b/lib/swift index 9c80802ba9..c0dec97c36 100644 --- a/lib/swift +++ b/lib/swift @@ -67,6 +67,10 @@ SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_D # Default is ``staticweb, tempurl, formpost`` SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb} +# Set ``SWIFT_EXTRAS_MIDDLEWARE_LAST`` to extras middlewares that need to be at +# the end of the pipeline. +SWIFT_EXTRAS_MIDDLEWARE_LAST=${SWIFT_EXTRAS_MIDDLEWARE_LAST} + # The ring uses a configurable number of bits from a path’s MD5 hash as # a partition index that designates a device. The number of bits kept # from the hash is known as the partition power, and 2 to the partition @@ -255,6 +259,12 @@ function configure_swift() { iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} + # Configure Ceilometer + if is_service_enabled ceilometer; then + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer use "egg:ceilometer#swift" + SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer" + fi + # By default Swift will be installed with keystone and tempauth middleware # and add the swift3 middleware if its configured for it. The token for # tempauth would be prefixed with the reseller_prefix setting TEMPAUTH_ the @@ -264,6 +274,7 @@ function configure_swift() { fi swift_pipeline+=" authtoken keystoneauth tempauth " sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER} + sed -i "/^pipeline/ { s/proxy-server/${SWIFT_EXTRAS_MIDDLEWARE_LAST} proxy-server/ ; }" ${SWIFT_CONFIG_PROXY_SERVER} iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true From dc0bd1a88613b1659b780cc412527ee88f84c2e8 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Fri, 27 Sep 2013 07:45:56 +0200 Subject: [PATCH 0128/4438] Use the rdo havana repo with the RHEL family In devstack viewpoint there is not too much differences at the moment. But using the grizzly named repo close to havana release, was strange to me. Switching to the repo link which does not have a version like '-3'. Change-Id: Ib421d50d19baeeeff264aa0cb9c105fffcf572f8 --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 46c3f443c5..4bd186fdc5 100755 --- a/stack.sh +++ b/stack.sh @@ -150,8 +150,8 @@ fi if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then # Installing Open vSwitch on RHEL6 requires enabling the RDO repo. - RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack/openstack-grizzly/rdo-release-grizzly-3.noarch.rpm"} - RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-grizzly"} + RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack-havana/rdo-release-havana.rpm"} + RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-havana"} if ! yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then echo "RDO repo not detected; installing" yum_install $RHEL6_RDO_REPO_RPM || \ From 0aa8534ada621becb3a6bd14e4e6b5faabde9dd6 Mon Sep 17 00:00:00 2001 From: JUN JIE NAN Date: Fri, 13 Sep 2013 15:47:09 +0800 Subject: [PATCH 0129/4438] Using no proxy option to skip wget and curl proxy settings in config When end users specify proxy settings in config file for wget /etc/wgetrc: http_proxy = http://... or for curl ${HOME}/.curlrc: proxy = http://... Using `http_proxy="" wget' can not skip the proxy setting in the config files, also it can skip proxy settings in env viriables. In order to skip proxy setting in both env and config file, we pass --no-proxy option for wget, and --noproxy '*' for curl. Fixes bug #1224836 Change-Id: I2b25aeca9edf2ce4525fb1db325e5e24c18b4d55 --- functions | 2 +- lib/glance | 2 +- lib/ironic | 2 +- lib/keystone | 2 +- lib/neutron | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/functions b/functions index 83826f9327..fb2f3a3df1 100644 --- a/functions +++ b/functions @@ -1484,7 +1484,7 @@ function use_exclusive_service { function wait_for_service() { local timeout=$1 local url=$2 - timeout $timeout sh -c "while ! http_proxy= https_proxy= curl -s $url >/dev/null; do sleep 1; done" + timeout $timeout sh -c "while ! curl --noproxy '*' -s $url >/dev/null; do sleep 1; done" } diff --git a/lib/glance b/lib/glance index 7e6968200f..c6f11d06da 100644 --- a/lib/glance +++ b/lib/glance @@ -193,7 +193,7 @@ function start_glance() { screen_it g-reg "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then die $LINENO "g-api did not start" fi } diff --git a/lib/ironic b/lib/ironic index 072d2ded82..f3b4a72f66 100644 --- a/lib/ironic +++ b/lib/ironic @@ -194,7 +194,7 @@ function start_ironic() { function start_ironic_api() { screen_it ir-api "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE" echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then die $LINENO "ir-api did not start" fi } diff --git a/lib/keystone b/lib/keystone index 699b94abb5..c93a4367d2 100755 --- a/lib/keystone +++ b/lib/keystone @@ -372,7 +372,7 @@ function start_keystone() { fi echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s http://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then die $LINENO "keystone did not start" fi diff --git a/lib/neutron b/lib/neutron index 4a3d1b06a6..efbb45c16e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -419,7 +419,7 @@ function start_neutron_service_and_check() { # Start the Neutron service screen_it q-svc "cd $NEUTRON_DIR && python $NEUTRON_BIN_DIR/neutron-server $CFG_FILE_OPTIONS" echo "Waiting for Neutron to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then die $LINENO "Neutron did not start" fi } From aee9412b4bad788125e513c9d455283f14ed84de Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 30 Sep 2013 11:48:00 +0000 Subject: [PATCH 0130/4438] Allow openrc to be loaded in zsh This fix the test then detect OSX in GetOSVersion that break support of zsh. Fixes bug #1233118 Change-Id: If243fbe59f8f08041327057425018d7ae0d13ab2 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 83826f9327..d9445fe6e7 100644 --- a/functions +++ b/functions @@ -364,7 +364,7 @@ function get_packages() { # GetOSVersion GetOSVersion() { # Figure out which vendor we are - if [[ -n "`which sw_vers 2>/dev/null`" ]]; then + if [[ -x "`which sw_vers 2>/dev/null`" ]]; then # OS/X os_VENDOR=`sw_vers -productName` os_RELEASE=`sw_vers -productVersion` From fa181c30fc7140b1549e955a6a26d11fe015d6ce Mon Sep 17 00:00:00 2001 From: Thomas Maddox Date: Wed, 25 Sep 2013 20:10:22 +0000 Subject: [PATCH 0131/4438] Add back rpc_notifier when ceilometer is enabled This is to reverse what looks like collateral damage from change id Ic375272b751159a64777ca73c1b64515195aacfb. When the Ceilometer service is enabled, we also need to tell nova what to use to send notifications. Change-Id: I0015194cfa819e89ef85eae5020fedd6e7d71894 Closes-Bug: #1231158 --- lib/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/nova b/lib/nova index e5c78d8fe1..99cd843ea1 100644 --- a/lib/nova +++ b/lib/nova @@ -510,6 +510,7 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT instance_usage_audit "True" iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour" iniset $NOVA_CONF DEFAULT notify_on_state_change "vm_and_task_state" + iniset $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier" fi # Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS`` From 78ef1f3b2ce978191955f59fcb63892a692c7173 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Sun, 29 Sep 2013 11:36:28 +0100 Subject: [PATCH 0132/4438] XenAPI: Get the management network dynamically xenbr0 is correct for most installations, but not all. Notable xenserver-core may use a differently named device. Since we can auto detect this, remove the config and do so. Change-Id: I989f6ddd5ffb526ab350f263ef6fc402c596304a --- tools/xen/install_os_domU.sh | 5 +++++ tools/xen/xenrc | 5 +---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 110bbd998c..08e0f787b0 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -96,6 +96,10 @@ create_directory_for_images # # Configure Networking # + +MGT_NETWORK=`xe pif-list management=true params=network-uuid minimal=true` +MGT_BRIDGE_OR_NET_NAME=`xe network-list uuid=$MGT_NETWORK params=bridge minimal=true` + setup_network "$VM_BRIDGE_OR_NET_NAME" setup_network "$MGT_BRIDGE_OR_NET_NAME" setup_network "$PUB_BRIDGE_OR_NET_NAME" @@ -203,6 +207,7 @@ if [ -z "$templateuuid" ]; then # # Install Ubuntu over network # + UBUNTU_INST_BRIDGE_OR_NET_NAME=${UBUNTU_INST_BRIDGE_OR_NET_NAME:-"$MGT_BRIDGE_OR_NET_NAME"} # always update the preseed file, incase we have a newer one PRESEED_URL=${PRESEED_URL:-""} diff --git a/tools/xen/xenrc b/tools/xen/xenrc index f698be1085..82aa29821c 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -20,9 +20,7 @@ OSDOMU_VDI_GB=8 # differ across localised versions of XenServer. If a given bridge/network # was not found, a new network will be created with the specified name. -# The management network is specified by the bridge name. xenbr0 is usually -# the name of the bridge of the network associated with the hypervisor's eth0. -MGT_BRIDGE_OR_NET_NAME="xenbr0" +# Get the management network from the XS installation VM_BRIDGE_OR_NET_NAME="OpenStack VM Network" PUB_BRIDGE_OR_NET_NAME="OpenStack Public Network" XEN_INT_BRIDGE_OR_NET_NAME="OpenStack VM Integration Network" @@ -72,7 +70,6 @@ UBUNTU_INST_HTTP_PROXY="" UBUNTU_INST_LOCALE="en_US" UBUNTU_INST_KEYBOARD="us" # network configuration for ubuntu netinstall -UBUNTU_INST_BRIDGE_OR_NET_NAME=${UBUNTU_INST_BRIDGE_OR_NET_NAME:-"$MGT_BRIDGE_OR_NET_NAME"} UBUNTU_INST_IP="dhcp" UBUNTU_INST_NAMESERVERS="" UBUNTU_INST_NETMASK="" From 1c1aef0eb7796f0fe8b2502eb4aaa62369b7842a Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 1 Oct 2013 07:56:21 -0400 Subject: [PATCH 0133/4438] Revert "Enable tenant isolation to tempest for neutron" This reverts commit 3d84cf2d7c323750971cf2d27f3a4eaa26cb7a9f. This is believed to be the cause for the massive increase in neutron failures in the gate reseting other projects. Realize this is just a work around. Change-Id: Id3c59f3fe9ccbb869eb3200ef7ff2659409e2253 Partial-Bug: 1224001 --- lib/tempest | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/tempest b/lib/tempest index 646d42b8bd..bc0b18d9f4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -230,6 +230,11 @@ function configure_tempest() { # Compute iniset $TEMPEST_CONF compute change_password_available False + # Note(nati) current tempest don't create network for each tenant + # so reuse same tenant for now + if is_service_enabled neutron; then + TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-False} + fi iniset $TEMPEST_CONF compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED iniset $TEMPEST_CONF compute network_for_ssh $PRIVATE_NETWORK_NAME From 5946b57bb2b43c1690d85d6423d0a7a56565c6a4 Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Mon, 30 Sep 2013 23:48:26 +0100 Subject: [PATCH 0134/4438] Show where files are copied for sudo switch to stack user The home directory for the 'stack' user defaults to /opt/stack, which is not obvious to devstack newbies, and can also be overridden by exporting a value for DEST. Therefore it's friendlier to be explicit about the location of this home directory, to which devstack is copied before being run as the 'stack' user. Change-Id: Ia1941a5f2f8cf86a06681e85da52b817a855b8ff --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 03d5632495..b39cd73bb9 100755 --- a/stack.sh +++ b/stack.sh @@ -200,8 +200,8 @@ if [[ $EUID -eq 0 ]]; then ( umask 226 && echo "$STACK_USER ALL=(ALL) NOPASSWD:ALL" \ > /etc/sudoers.d/50_stack_sh ) - echo "Copying files to $STACK_USER user" STACK_DIR="$DEST/${TOP_DIR##*/}" + echo "Copying files to $STACK_DIR" cp -r -f -T "$TOP_DIR" "$STACK_DIR" safe_chown -R $STACK_USER "$STACK_DIR" cd "$STACK_DIR" From c85ade77204af724ee04f7b7d6d406e50f25ead6 Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Tue, 1 Oct 2013 00:35:16 +0100 Subject: [PATCH 0135/4438] set errexit and xtrace in helper scripts stack.sh invokes some helper scripts as separate processes, rather than by source'ing them. As with stack.sh itself, abort immediately on the first error, so that errors don't compound and result in confusing error messages. If one of these helper scripts aborts, stack.sh itself will also abort in the usual manner. Due to the change in behaviour, tweak some mv invocations to ensure that they don't trigger false failures. As with stack.sh itself, also enable xtrace so we can see exactly what's happening. In particular this allows us to see the cause of any premature termination due to a command failing whilst errexit is enabled. Change-Id: I7a55784c31e5395e29ab9bbe2bb112b83b9be693 --- tools/create_userrc.sh | 27 +++++++++++++++++++++------ tools/fixup_stuff.sh | 2 ++ tools/install_pip.sh | 3 +++ 3 files changed, 26 insertions(+), 6 deletions(-) diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index 619d63f7ff..44b0f6bba0 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -6,6 +6,9 @@ # Warning: This script just for development purposes +set -o errexit +set -o xtrace + ACCOUNT_DIR=./accrc display_help() @@ -138,10 +141,14 @@ s3=`keystone endpoint-get --service s3 | awk '/\|[[:space:]]*s3.publicURL/ {prin mkdir -p "$ACCOUNT_DIR" ACCOUNT_DIR=`readlink -f "$ACCOUNT_DIR"` EUCALYPTUS_CERT=$ACCOUNT_DIR/cacert.pem -mv "$EUCALYPTUS_CERT" "$EUCALYPTUS_CERT.old" &>/dev/null +if [ -e "$EUCALYPTUS_CERT" ]; then + mv "$EUCALYPTUS_CERT" "$EUCALYPTUS_CERT.old" +fi if ! nova x509-get-root-cert "$EUCALYPTUS_CERT"; then echo "Failed to update the root certificate: $EUCALYPTUS_CERT" >&2 - mv "$EUCALYPTUS_CERT.old" "$EUCALYPTUS_CERT" &>/dev/null + if [ -e "$EUCALYPTUS_CERT.old" ]; then + mv "$EUCALYPTUS_CERT.old" "$EUCALYPTUS_CERT" + fi fi @@ -168,12 +175,20 @@ function add_entry(){ local ec2_cert="$rcfile-cert.pem" local ec2_private_key="$rcfile-pk.pem" # Try to preserve the original file on fail (best effort) - mv -f "$ec2_private_key" "$ec2_private_key.old" &>/dev/null - mv -f "$ec2_cert" "$ec2_cert.old" &>/dev/null + if [ -e "$ec2_private_key" ]; then + mv -f "$ec2_private_key" "$ec2_private_key.old" + fi + if [ -e "$ec2_cert" ]; then + mv -f "$ec2_cert" "$ec2_cert.old" + fi # It will not create certs when the password is incorrect if ! nova --os-password "$user_passwd" --os-username "$user_name" --os-tenant-name "$tenant_name" x509-create-cert "$ec2_private_key" "$ec2_cert"; then - mv -f "$ec2_private_key.old" "$ec2_private_key" &>/dev/null - mv -f "$ec2_cert.old" "$ec2_cert" &>/dev/null + if [ -e "$ec2_private_key.old" ]; then + mv -f "$ec2_private_key.old" "$ec2_private_key" + fi + if [ -e "$ec2_cert.old" ]; then + mv -f "$ec2_cert.old" "$ec2_cert" + fi fi cat >"$rcfile" < Date: Tue, 1 Oct 2013 00:56:54 +0100 Subject: [PATCH 0136/4438] Ensure SSL CA certificates are installed for curl On openSUSE, ensure that the ca-certificates-mozilla package is installed to avoid curl aborting with curl: (60) SSL certificate problem: unable to get local issuer certificate when trying to download the pip source tarball. Change-Id: Iaf74204ea5330e9abf56f6c9d5a0f9d83992aa59 --- files/rpms-suse/general | 1 + 1 file changed, 1 insertion(+) diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 355af885d3..c8c234e54c 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -1,4 +1,5 @@ bridge-utils +ca-certificates-mozilla curl euca2ools git-core From 15aa0fc315e231ab3564eab646ca72a359964278 Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Tue, 1 Oct 2013 01:10:16 +0100 Subject: [PATCH 0137/4438] Uniquify unsupported distro error message Change a distro-not-supported error message in lib/horizon so that it can't be confused with a similar error case in lib/apache. Change-Id: I1197cb4de1497906e93a2c3ce09c3c06afe03b65 --- lib/horizon | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/horizon b/lib/horizon index 5973eb2a9f..f770ded42b 100644 --- a/lib/horizon +++ b/lib/horizon @@ -131,7 +131,7 @@ function init_horizon() { fi sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf else - exit_distro_not_supported "apache configuration" + exit_distro_not_supported "horizon apache configuration" fi # Remove old log files that could mess with how devstack detects whether Horizon From 3ac8612b55b9d79d214ce5a10eb37e3b017a74ad Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Tue, 1 Oct 2013 01:08:20 +0100 Subject: [PATCH 0138/4438] Don't bail when setting up horizon on openSUSE I85325179f1792d985b0375572abfe8c8a82fecc3 accidentally removed the conditional branch required to prevent setup of horizon aborting on openSUSE, so put it back in. Change-Id: Ia3e4464a2d718e402d84a0bcf60f13ef30404969 --- lib/horizon | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/horizon b/lib/horizon index 5973eb2a9f..048887ee10 100644 --- a/lib/horizon +++ b/lib/horizon @@ -130,6 +130,8 @@ function init_horizon() { HORIZON_REQUIRE='Require all granted' fi sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf + elif is_suse; then + : # nothing to do else exit_distro_not_supported "apache configuration" fi From 6d8fce732523c183fa307c6c5a685e257bdbd78a Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Tue, 1 Oct 2013 15:59:05 +0100 Subject: [PATCH 0139/4438] Fix handling of pip and virtualenv on openSUSE openSUSE's python-virtualenv rpm depends on the python-pip rpm, but tools/install_pip.sh prefers to deinstall the latter (if installed) and install pip directly from upstream source instead. This deinstallation of python-pip will break if attempted via rpm -e, since rpm does not transitively remove dependents (in this case python-virtualenv). In contrast, "zypper rm" does, so we switch to that. It is safe to remove the python-virtualenv package, since stack.sh will install virtualenv via pip instead. Change-Id: I5bc23de0f2de2e3940c4be3b76b7c0634836239b --- files/rpms-suse/general | 1 - functions | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 355af885d3..c5c41d7009 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -10,7 +10,6 @@ python-setuptools # instead of python-distribute; dist:sle11sp2 python-cmd2 # dist:opensuse-12.3 python-pylint python-unittest2 -python-virtualenv screen tar tcpdump diff --git a/functions b/functions index 83826f9327..bc4f05a90f 100644 --- a/functions +++ b/functions @@ -926,7 +926,7 @@ function uninstall_package() { elif is_fedora; then sudo yum remove -y "$@" elif is_suse; then - sudo rpm -e "$@" + sudo zypper rm "$@" else exit_distro_not_supported "uninstalling packages" fi From 1089b3a5f6ce7742f12842d0f1e30858cd9c1df8 Mon Sep 17 00:00:00 2001 From: Ed Cranford Date: Mon, 30 Sep 2013 11:36:55 -0500 Subject: [PATCH 0140/4438] Adds trove-conductor service to trove. Change-Id: Ibf14267c9a2125218c17fb34761548e339c8e784 --- lib/trove | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/lib/trove b/lib/trove index e64ca5f6ac..17c8c99835 100644 --- a/lib/trove +++ b/lib/trove @@ -109,12 +109,15 @@ function configure_trove() { # (Re)create trove conf files rm -f $TROVE_CONF_DIR/trove.conf rm -f $TROVE_CONF_DIR/trove-taskmanager.conf + rm -f $TROVE_CONF_DIR/trove-conductor.conf + iniset $TROVE_CONF_DIR/trove.conf DEFAULT rabbit_password $RABBIT_PASSWORD iniset $TROVE_CONF_DIR/trove.conf DEFAULT sql_connection `database_connection_url trove` iniset $TROVE_CONF_DIR/trove.conf DEFAULT add_addresses True iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT rabbit_password $RABBIT_PASSWORD iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT sql_connection `database_connection_url trove` + iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT control_exchange trove sed -i "s/localhost/$NETWORK_GATEWAY/g" $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample # (Re)create trove taskmanager conf file if needed @@ -127,6 +130,17 @@ function configure_trove() { iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT fi + + # (Re)create trove conductor conf file if needed + if is_service_enabled tr-cond; then + iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT rabbit_password $RABBIT_PASSWORD + iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT sql_connection `database_connection_url trove` + iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT nova_proxy_admin_user radmin + iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT nova_proxy_admin_tenant_name trove + iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS + iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT + iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT control_exchange trove + fi } # install_troveclient() - Collect source and prepare @@ -152,12 +166,13 @@ function init_trove() { function start_trove() { screen_it tr-api "cd $TROVE_DIR; bin/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug 2>&1" screen_it tr-tmgr "cd $TROVE_DIR; bin/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug 2>&1" + screen_it tr-cond "cd $TROVE_DIR; bin/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug 2>&1" } # stop_trove() - Stop running processes function stop_trove() { # Kill the trove screen windows - for serv in tr-api tr-tmgr; do + for serv in tr-api tr-tmgr tr-cond; do screen -S $SCREEN_NAME -p $serv -X kill done } From 87acc91fc67dd2c349008aad9a4f6c1770f3eb7e Mon Sep 17 00:00:00 2001 From: Joe Mills Date: Tue, 1 Oct 2013 08:13:06 +0000 Subject: [PATCH 0141/4438] Add MIDONET settings to dhcp.ini The midonet specific settings were not being added to the dhcp specific config file. This change adds those settings. Closes-bug: #1233941 Change-Id: I4155135528c6ba77cf57d30ac256580c7239794f Signed-off-by: Joe Mills --- lib/neutron_plugins/midonet | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet index 0ad760b289..f09c67527e 100644 --- a/lib/neutron_plugins/midonet +++ b/lib/neutron_plugins/midonet @@ -37,6 +37,18 @@ function neutron_plugin_configure_dhcp_agent() { iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver $DHCP_INTERFACE_DRIVER iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces True iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True + if [[ "$MIDONET_API_URI" != "" ]]; then + iniset $Q_DHCP_CONF_FILE MIDONET midonet_uri "$MIDONET_API_URI" + fi + if [[ "$MIDONET_USERNAME" != "" ]]; then + iniset $Q_DHCP_CONF_FILE MIDONET username "$MIDONET_USERNAME" + fi + if [[ "$MIDONET_PASSWORD" != "" ]]; then + iniset $Q_DHCP_CONF_FILE MIDONET password "$MIDONET_PASSWORD" + fi + if [[ "$MIDONET_PROJECT_ID" != "" ]]; then + iniset $Q_DHCP_CONF_FILE MIDONET project_id "$MIDONET_PROJECT_ID" + fi } function neutron_plugin_configure_l3_agent() { From bfb880d547d03e8eb2230b9c9ad6baf374f2d3c3 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 2 Oct 2013 17:44:18 +0100 Subject: [PATCH 0142/4438] xenapi: increase default memory to 3G Devstack was swapping with 2G Change-Id: I8fe77591cb0ca0f946028d7219b43d77eea3419f --- tools/xen/xenrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/xenrc b/tools/xen/xenrc index f698be1085..6372ea7faa 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -13,7 +13,7 @@ CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false} # Size of image VDI_MB=${VDI_MB:-5000} -OSDOMU_MEM_MB=2048 +OSDOMU_MEM_MB=3072 OSDOMU_VDI_GB=8 # Network mapping. Specify bridge names or network names. Network names may From 96ba6ec1bf0b7cc54f9968e4cc3aa80b8f2c368e Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 2 Oct 2013 11:08:56 -0700 Subject: [PATCH 0143/4438] Change DATABASE_HOST default to 127.0.0.1 Attempt to fix what is suspected to be a DNS resolution issue with postgresql check job. Closes-Bug: #1232748 Change-Id: Ic82e54b2af038e6c21d4f026f3da10f34c3c185c --- lib/database | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/database b/lib/database index 442ed56fbe..3c1560964c 100644 --- a/lib/database +++ b/lib/database @@ -64,7 +64,7 @@ function initialize_database_backends { # For backward-compatibility, read in the MYSQL_HOST/USER variables and use # them as the default values for the DATABASE_HOST/USER variables. - MYSQL_HOST=${MYSQL_HOST:-localhost} + MYSQL_HOST=${MYSQL_HOST:-127.0.0.1} MYSQL_USER=${MYSQL_USER:-root} DATABASE_HOST=${DATABASE_HOST:-${MYSQL_HOST}} From 49f4486f1caff209254f560deecd774246c91c79 Mon Sep 17 00:00:00 2001 From: Ryan Hsu Date: Thu, 3 Oct 2013 22:27:03 -0700 Subject: [PATCH 0144/4438] Fix typo in property passed to glance In the upload_image function, a property that is passed to glance in the vmdk conditional block has a typo. Changes "vmware-disktype" to "vmware_disktype" (dash to underscore) Change-Id: I6c4e1875b6ab4544f9742ab08893dae0e86965a0 Closes-Bug: #1235080 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index d9445fe6e7..cc5089f55c 100644 --- a/functions +++ b/functions @@ -1335,7 +1335,7 @@ function upload_image() { vmdk_net_adapter="${props[2]}" fi - glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format bare --disk-format vmdk --property vmware-disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${IMAGE}" + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format bare --disk-format vmdk --property vmware_disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${IMAGE}" return fi From e6024413ae69bd0ec2abefe613b850680047a09c Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 15 Sep 2013 18:38:48 +0200 Subject: [PATCH 0145/4438] lib/swift variable changes and dd replaced by truncate - ${SWIFT_DATA_DIR}/drives/images/swift.img replaced by ${SWIFT_DISK_IMAGE}. - using truncate -s command instead of dd over seeking Change-Id: I0dd29af3247ba7819ef0c74775412074b6b62017 --- lib/swift | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/lib/swift b/lib/swift index 9c80802ba9..de52576e64 100644 --- a/lib/swift +++ b/lib/swift @@ -39,6 +39,7 @@ SWIFT3_DIR=$DEST/swift3 # Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects. # Default is the common DevStack data directory. SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift} +SWIFT_DISK_IMAGE=${SWIFT_DATA_DIR}/drives/images/swift.img # Set ``SWIFT_CONF_DIR`` to the location of the configuration files. # Default is ``/etc/swift``. @@ -55,10 +56,10 @@ fi # swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in # kilobytes. # Default is 1 gigabyte. -SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=1048576 +SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=1G # if tempest enabled the default size is 4 Gigabyte. if is_service_enabled tempest; then - SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=${SWIFT_LOOPBACK_DISK_SIZE:-4194304} + SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=${SWIFT_LOOPBACK_DISK_SIZE:-4G} fi SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_DEFAULT} @@ -103,8 +104,8 @@ function cleanup_swift() { if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 fi - if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then - rm ${SWIFT_DATA_DIR}/drives/images/swift.img + if [[ -e ${SWIFT_DISK_IMAGE} ]]; then + rm ${SWIFT_DISK_IMAGE} fi rm -rf ${SWIFT_DATA_DIR}/run/ if is_apache_enabled_service swift; then @@ -409,28 +410,27 @@ function create_swift_disk() { sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} # Create a loopback disk and format it to XFS. - if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then + if [[ -e ${SWIFT_DISK_IMAGE} ]]; then if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 - sudo rm -f ${SWIFT_DATA_DIR}/drives/images/swift.img + sudo rm -f ${SWIFT_DISK_IMAGE} fi fi mkdir -p ${SWIFT_DATA_DIR}/drives/images - sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img - sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img + sudo touch ${SWIFT_DISK_IMAGE} + sudo chown $USER: ${SWIFT_DISK_IMAGE} - dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \ - bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} + truncate -s ${SWIFT_LOOPBACK_DISK_SIZE} ${SWIFT_DISK_IMAGE} # Make a fresh XFS filesystem - mkfs.xfs -f -i size=1024 ${SWIFT_DATA_DIR}/drives/images/swift.img + mkfs.xfs -f -i size=1024 ${SWIFT_DISK_IMAGE} # Mount the disk with mount options to make it as efficient as possible mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1 if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ - ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1 + ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 fi # Create a link to the above mount and From 9f878cbe6dcbd26e756546c1fc7a97994c7a311d Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Fri, 4 Oct 2013 09:56:24 +0100 Subject: [PATCH 0146/4438] xenapi: max out VCPU count Give as much VCPUs to the DevStack machine as possible. First asking xenapi about its CPU count, and as a fallback, count the CPUs in dom0. This should result in faster test runs. Change-Id: I1ffb99ecd435f1d7eb5754fe9cd99f0e8ceae6dc --- tools/xen/functions | 32 ++++++++++++++++++++++++++++++++ tools/xen/install_os_domU.sh | 3 +++ 2 files changed, 35 insertions(+) diff --git a/tools/xen/functions b/tools/xen/functions index a5c4b70bc3..c65d919e3f 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -287,3 +287,35 @@ function set_vm_memory() { dynamic-max=${memory}MiB \ uuid=$vm } + +function max_vcpus() { + local vm_name_label + + vm_name_label="$1" + + local vm + local host + local cpu_count + + host=$(xe host-list --minimal) + vm=$(_vm_uuid "$vm_name_label") + + cpu_count=$(xe host-param-get \ + param-name=cpu_info \ + uuid=$host | + sed -e 's/^.*cpu_count: \([0-9]*\);.*$/\1/g') + + if [ -z "$cpu_count" ]; then + # get dom0's vcpu count + cpu_count=$(cat /proc/cpuinfo | grep processor | wc -l) + fi + + # Assert cpu_count is not empty + [ -n "$cpu_count" ] + + # Assert ithas a numeric nonzero value + expr "$cpu_count" + 0 + + xe vm-param-set uuid=$vm VCPUs-max=$cpu_count + xe vm-param-set uuid=$vm VCPUs-at-startup=$cpu_count +} diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 110bbd998c..e69cdea04f 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -263,6 +263,9 @@ $THIS_DIR/prepare_guest_template.sh "$GUEST_NAME" # Set virtual machine parameters set_vm_memory "$GUEST_NAME" "$OSDOMU_MEM_MB" +# Max out VCPU count for better performance +max_vcpus "$GUEST_NAME" + # start the VM to run the prepare steps xe vm-start vm="$GUEST_NAME" From a2fd222ee976e93898a66372ef764b7756724321 Mon Sep 17 00:00:00 2001 From: Joe Mills Date: Fri, 4 Oct 2013 11:46:10 +0000 Subject: [PATCH 0147/4438] Change Midonet vif driver to generic Use generic vif driver for Midonet to support port bindings through the mm-ctl script. Change-Id: Iddc8a1c7b0128a76cd778c0245f2098bfb2c0145 Closes-Bug: 1235202 --- lib/neutron_plugins/midonet | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet index 0ad760b289..193055f7db 100644 --- a/lib/neutron_plugins/midonet +++ b/lib/neutron_plugins/midonet @@ -12,7 +12,7 @@ function is_neutron_ovs_base_plugin() { } function neutron_plugin_create_nova_conf() { - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"midonet.nova.virt.libvirt.vif.MidonetVifDriver"} + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} } function neutron_plugin_install_agent_packages() { From ca5af8615e58b78dbb0242074bc35aec5de1dda5 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 4 Oct 2013 13:33:07 -0500 Subject: [PATCH 0148/4438] Remove general assumption in get_packages() get_packages() always included 'general' as a default 'service' file. Remove this assumption and add it explicitly to the primary package installation call. This allows get_package() to be used in other places where 'general' is not desired to be included. Change-Id: I1eed4386d073d6ae9534aedae32654208c6662e8 --- functions | 4 ++-- tools/install_prereqs.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/functions b/functions index d9445fe6e7..6aee24008c 100644 --- a/functions +++ b/functions @@ -248,7 +248,7 @@ function _get_package_dir() { # - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection # of the package to the distros listed. The distro names are case insensitive. function get_packages() { - local services=$1 + local services=$@ local package_dir=$(_get_package_dir) local file_to_parse local service @@ -260,7 +260,7 @@ function get_packages() { if [[ -z "$DISTRO" ]]; then GetDistro fi - for service in general ${services//,/ }; do + for service in ${services//,/ }; do # Allow individual services to specify dependencies if [[ -e ${package_dir}/${service} ]]; then file_to_parse="${file_to_parse} $service" diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index 68f11ce35e..0c65fd9b00 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -55,7 +55,7 @@ export_proxy_variables # ================ # Install package requirements -install_package $(get_packages $ENABLED_SERVICES) +install_package $(get_packages general $ENABLED_SERVICES) if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then if is_ubuntu || is_fedora; then From 23f69d83e5564ece0308535117cc6d224fcc3557 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 4 Oct 2013 12:35:24 -0500 Subject: [PATCH 0149/4438] Split the creation of $STACK_USER account out of stack.sh Automatically creating a new user account is not always the right course of action when stack.sh is running as root. Plus, the re-exec did not work correctly in some cases. * Create tools/create-stack-user.sh to set up a suitable user for running DevStack * Abort stack.sh and unstack.sh if running as root and suggest creating a suitable user account. Change-Id: I5d967c00c89f32e861449234ea8fe19261cd9ae3 --- README.md | 8 +++- stack.sh | 80 ++++++++++++-------------------------- tools/create-stack-user.sh | 49 +++++++++++++++++++++++ unstack.sh | 6 +++ 4 files changed, 87 insertions(+), 56 deletions(-) create mode 100644 tools/create-stack-user.sh diff --git a/README.md b/README.md index 99e983887e..6dc9ecd1e3 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ You can also pick specific OpenStack project releases by setting the appropriate # Start A Dev Cloud -Installing in a dedicated disposable vm is safer than installing on your dev machine! To start a dev cloud: +Installing in a dedicated disposable vm is safer than installing on your dev machine! Plus you can pick one of the supported Linux distros for your VM. To start a dev cloud run the following NOT AS ROOT (see below for more): ./stack.sh @@ -57,6 +57,12 @@ If the EC2 API is your cup-o-tea, you can create credentials and use euca2ools: # list instances using ec2 api euca-describe-instances +# DevStack Execution Environment + +DevStack runs rampant over the system it runs on, installing things and uninstalling other things. Running this on a system you care about is a recipe for disappointment, or worse. Alas, we're all in the virtualization business here, so run it in a VM. And take advantage of the snapshot capabilities of your hypervisor of choice to reduce testing cycle times. You might even save enough time to write one more feature before the next feature freeze... + +``stack.sh`` needs to have root access for a lot of tasks, but it also needs to have not-root permissions for most of its work and for all of the OpenStack services. So ``stack.sh`` specifically does not run if you are root. This is a recent change (Oct 2013) from the previous behaviour of automatically creating a ``stack`` user. Automatically creating a user account is not always the right response to running as root, so that bit is now an explicit step using ``tools/create-stack-user.sh``. Run that (as root!) if you do not want to just use your normal login here, which works perfectly fine. + # Customizing You can override environment variables used in `stack.sh` by creating file name `localrc`. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host. diff --git a/stack.sh b/stack.sh index b39cd73bb9..86fe82a584 100755 --- a/stack.sh +++ b/stack.sh @@ -172,67 +172,37 @@ fi # ----------- # OpenStack is designed to be run as a non-root user; Horizon will fail to run -# as **root** since Apache will not serve content from **root** user). If -# ``stack.sh`` is run as **root**, it automatically creates a **stack** user with -# sudo privileges and runs as that user. +# as **root** since Apache will not serve content from **root** user). +# ``stack.sh`` must not be run as **root**. It aborts and suggests one course of +# action to create a suitable user account. if [[ $EUID -eq 0 ]]; then - ROOTSLEEP=${ROOTSLEEP:-10} echo "You are running this script as root." - echo "In $ROOTSLEEP seconds, we will create a user '$STACK_USER' and run as that user" - sleep $ROOTSLEEP - - # Give the non-root user the ability to run as **root** via ``sudo`` - is_package_installed sudo || install_package sudo - if ! getent group $STACK_USER >/dev/null; then - echo "Creating a group called $STACK_USER" - groupadd $STACK_USER - fi - if ! getent passwd $STACK_USER >/dev/null; then - echo "Creating a user called $STACK_USER" - useradd -g $STACK_USER -s /bin/bash -d $DEST -m $STACK_USER - fi - - echo "Giving stack user passwordless sudo privileges" - # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one - grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || - echo "#includedir /etc/sudoers.d" >> /etc/sudoers - ( umask 226 && echo "$STACK_USER ALL=(ALL) NOPASSWD:ALL" \ - > /etc/sudoers.d/50_stack_sh ) - - STACK_DIR="$DEST/${TOP_DIR##*/}" - echo "Copying files to $STACK_DIR" - cp -r -f -T "$TOP_DIR" "$STACK_DIR" - safe_chown -R $STACK_USER "$STACK_DIR" - cd "$STACK_DIR" - if [[ "$SHELL_AFTER_RUN" != "no" ]]; then - exec sudo -u $STACK_USER bash -l -c "set -e; bash stack.sh; bash" - else - exec sudo -u $STACK_USER bash -l -c "set -e; source stack.sh" - fi + echo "Cut it out." + echo "Really." + echo "If you need an account to run DevStack, do this (as root, heh) to create $STACK_USER:" + echo "$TOP_DIR/tools/create-stack-user.sh" exit 1 -else - # We're not **root**, make sure ``sudo`` is available - is_package_installed sudo || die "Sudo is required. Re-run stack.sh as root ONE TIME ONLY to set up sudo." - - # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one - sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || - echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers - - # Set up devstack sudoers - TEMPFILE=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE - # Some binaries might be under /sbin or /usr/sbin, so make sure sudo will - # see them by forcing PATH - echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE - chmod 0440 $TEMPFILE - sudo chown root:root $TEMPFILE - sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh - - # Remove old file - sudo rm -f /etc/sudoers.d/stack_sh_nova fi +# We're not **root**, make sure ``sudo`` is available +is_package_installed sudo || install_package sudo + +# UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one +sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || + echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers + +# Set up devstack sudoers +TEMPFILE=`mktemp` +echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE +# Some binaries might be under /sbin or /usr/sbin, so make sure sudo will +# see them by forcing PATH +echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE +chmod 0440 $TEMPFILE +sudo chown root:root $TEMPFILE +sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh + + # Create the destination directory and ensure it is writable by the user # and read/executable by everybody for daemons (e.g. apache run for horizon) sudo mkdir -p $DEST diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh new file mode 100644 index 0000000000..2251d1e67c --- /dev/null +++ b/tools/create-stack-user.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +# **create-stack-user.sh** + +# Create a user account suitable for running DevStack +# - create a group named $STACK_USER if it does not exist +# - create a user named $STACK_USER if it does not exist +# - home is $DEST +# - configure sudo for $STACK_USER + +# ``stack.sh`` was never intended to run as root. It had a hack to do what is +# now in this script and re-launch itself, but that hack was less than perfect +# and it was time for this nonsense to stop. Run this script as root to create +# the user and configure sudo. + + +# Keep track of the devstack directory +TOP_DIR=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP_DIR/functions + +# Determine what system we are running on. This provides ``os_VENDOR``, +# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` +# and ``DISTRO`` +GetDistro + +# Needed to get ``ENABLED_SERVICES`` +source $TOP_DIR/stackrc + +# Give the non-root user the ability to run as **root** via ``sudo`` +is_package_installed sudo || install_package sudo + +if ! getent group $STACK_USER >/dev/null; then + echo "Creating a group called $STACK_USER" + groupadd $STACK_USER +fi + +if ! getent passwd $STACK_USER >/dev/null; then + echo "Creating a user called $STACK_USER" + useradd -g $STACK_USER -s /bin/bash -d $DEST -m $STACK_USER +fi + +echo "Giving stack user passwordless sudo privileges" +# UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one +grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || + echo "#includedir /etc/sudoers.d" >> /etc/sudoers +( umask 226 && echo "$STACK_USER ALL=(ALL) NOPASSWD:ALL" \ + > /etc/sudoers.d/50_stack_sh ) diff --git a/unstack.sh b/unstack.sh index 05d9fb7c83..c944ccc0fb 100755 --- a/unstack.sh +++ b/unstack.sh @@ -24,6 +24,12 @@ source $TOP_DIR/stackrc # Destination path for service data DATA_DIR=${DATA_DIR:-${DEST}/data} +if [[ $EUID -eq 0 ]]; then + echo "You are running this script as root." + echo "It might work but you will have a better day running it as $STACK_USER" + exit 1 +fi + # Import apache functions source $TOP_DIR/lib/apache From d903476aa5270df703bd22bb58fed2c740042fbf Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Fri, 4 Oct 2013 23:20:24 +0100 Subject: [PATCH 0150/4438] Fix "instal_prereqs.sh" typo Change-Id: I745b159aea70412d424df506af0e3d1ca2d78034 --- HACKING.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/HACKING.rst b/HACKING.rst index dd665a2304..5f33d770f8 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -38,7 +38,7 @@ such as Grenade, to manage a DevStack installation. A number of additional scripts can be found in the ``tools`` directory that may be useful in supporting DevStack installations. Of particular note are ``info.sh`` -to collect and report information about the installed system, and ``instal_prereqs.sh`` +to collect and report information about the installed system, and ``install_prereqs.sh`` that handles installation of the prerequisite packages for DevStack. It is suitable, for example, to pre-load a system for making a snapshot. From 46ea7238682642990ef67dd73582e86a2d4e2a2d Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 7 Oct 2013 07:29:27 +0200 Subject: [PATCH 0151/4438] install_pip script fails if pip was not installed 'set -o errexit' recently added to the pip installer script, which causes the script fail when it does not able to find an already installed pip. This change handles the situation when pip is not installed. Change-Id: I18a42d13c4be6699db21ec5b6a095a88a199912d --- tools/install_pip.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 04e18261ac..940bd8c84a 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -50,10 +50,12 @@ GetDistro echo "Distro: $DISTRO" function get_versions() { - PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null) + PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || true) if [[ -n $PIP ]]; then PIP_VERSION=$($PIP --version | awk '{ print $2}') echo "pip: $PIP_VERSION" + else + echo "pip: Not Installed" fi } From ec0ff2acf8d0f58c3e2750cd94a1eb9949bcdad8 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Wed, 25 Sep 2013 17:29:58 -0700 Subject: [PATCH 0152/4438] Specify agent mode and service cluster uuid for nicira plugin Supports blueprint nsx-integrated-services Change-Id: Ib02716fe447f1d7f47f2f49d16f0d2ad7afe741f --- lib/neutron_plugins/nicira | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/neutron_plugins/nicira b/lib/neutron_plugins/nicira index e9deb64e11..ca89d57fe7 100644 --- a/lib/neutron_plugins/nicira +++ b/lib/neutron_plugins/nicira @@ -119,6 +119,16 @@ function neutron_plugin_configure_service() { if [[ "$NVP_REDIRECTS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT redirects $NVP_REDIRECTS fi + if [[ "$AGENT_MODE" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE nvp agent_mode $AGENT_MODE + if [[ "$AGENT_MODE" == "agentless" ]]; then + if [[ "$DEFAULT_SERVICE_CLUSTER_UUID" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_service_cluster_uuid $DEFAULT_SERVICE_CLUSTER_UUID + else + die $LINENO "Agentless mode requires a service cluster." + fi + fi + fi } function neutron_plugin_setup_interface_driver() { From 976e418a037df3621cf15dfc15df68e2095b28c0 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Wed, 2 Oct 2013 12:59:07 -0700 Subject: [PATCH 0153/4438] Specify ip address for nova metadata server for nicira plugin Supports blueprint nsx-integrated-services Change-Id: I265b9714ca531731b0b2e1b37e64c912666aed80 --- lib/neutron_plugins/nicira | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/neutron_plugins/nicira b/lib/neutron_plugins/nicira index ca89d57fe7..082c84674d 100644 --- a/lib/neutron_plugins/nicira +++ b/lib/neutron_plugins/nicira @@ -127,6 +127,7 @@ function neutron_plugin_configure_service() { else die $LINENO "Agentless mode requires a service cluster." fi + iniset /$Q_PLUGIN_CONF_FILE nvp_metadata metadata_server_address $Q_META_DATA_IP fi fi } From 9732b57e3de7c24cb494c0f923d791a782ca9c9a Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Sat, 21 Sep 2013 01:17:06 +0200 Subject: [PATCH 0154/4438] Nicira plugin: do not die if NVP gateway IP is missing Devstack should not die if the IP and prefix len for establishing a connection to the public network are not provided. In this case, the public gateway IP address used to configure Neutron's public network should be used, together with the prefix length of the public network's CIDR. This patch also ensures $PUBLIC_BRIDGE is created, even if Q_USE_DEBUG_COMMAND is disabled. Finally this patch also adds the teardown operation for restoring the original IP addresses on the interface used for connectivity to the public network implemented on the NVP gateway. Bug #1227750 Change-Id: Ib58738a578c46f2183d503cabfdc6039bfbeb702 --- lib/neutron_thirdparty/nicira | 38 +++++++++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/lib/neutron_thirdparty/nicira b/lib/neutron_thirdparty/nicira index 5a20934a1b..3f2a5af11f 100644 --- a/lib/neutron_thirdparty/nicira +++ b/lib/neutron_thirdparty/nicira @@ -18,22 +18,38 @@ set +o xtrace # to an network that allows it to talk to the gateway for # testing purposes NVP_GATEWAY_NETWORK_INTERFACE=${NVP_GATEWAY_NETWORK_INTERFACE:-eth2} +# Re-declare floating range as it's needed also in stop_nicira, which +# is invoked by unstack.sh +FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28} function configure_nicira() { : } function init_nicira() { - die_if_not_set $LINENO NVP_GATEWAY_NETWORK_CIDR "Please, specify CIDR for the gateway network interface." + if ! is_set NVP_GATEWAY_NETWORK_CIDR; then + NVP_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} + echo "The IP address to set on br-ex was not specified. " + echo "Defaulting to "$NVP_GATEWAY_NETWORK_CIDR + fi # Make sure the interface is up, but not configured - sudo ifconfig $NVP_GATEWAY_NETWORK_INTERFACE up + sudo ip link dev $NVP_GATEWAY_NETWORK_INTERFACE set up + # Save and then flush the IP addresses on the interface + addresses=$(ip addr show dev $NVP_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'}) sudo ip addr flush $NVP_GATEWAY_NETWORK_INTERFACE # Use the PUBLIC Bridge to route traffic to the NVP gateway # NOTE(armando-migliaccio): if running in a nested environment this will work # only with mac learning enabled, portsecurity and security profiles disabled + # The public bridge might not exist for the NVP plugin if Q_USE_DEBUG_COMMAND is off + # Try to create it anyway + sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_INTERFACE nvp_gw_net_if_mac=$(ip link show $NVP_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}') - sudo ifconfig $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_CIDR hw ether $nvp_gw_net_if_mac + sudo ip link dev $PUBLIC_BRIDGE set address $nvp_gw_net_if_mac + for address in $addresses; do + sudo ip addr add dev $PUBLIC_BRIDGE $address + done + sudo ip addr add dev $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_CIDR } function install_nicira() { @@ -45,7 +61,21 @@ function start_nicira() { } function stop_nicira() { - : + if ! is_set NVP_GATEWAY_NETWORK_CIDR; then + NVP_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} + echo "The IP address expected on br-ex was not specified. " + echo "Defaulting to "$NVP_GATEWAY_NETWORK_CIDR + fi + sudo ip addr del $NVP_GATEWAY_NETWORK_CIDR dev $PUBLIC_BRIDGE + # Save and then flush remaining addresses on the interface + addresses=$(ip addr show dev $PUBLIC_BRIDGE | grep inet | awk {'print $2'}) + sudo ip addr flush $PUBLIC_BRIDGE + # Try to detach physical interface from PUBLIC_BRIDGE + sudo ovs-vsctl del-port $NVP_GATEWAY_NETWORK_INTERFACE + # Restore addresses on NVP_GATEWAY_NETWORK_INTERFACE + for address in $addresses; do + sudo ip addr add dev $NVP_GATEWAY_NETWORK_INTERFACE $address + done } # Restore xtrace From 4897ff55d77cd957c57f9717785d12f86cd5b824 Mon Sep 17 00:00:00 2001 From: Arata Notsu Date: Wed, 9 Oct 2013 18:49:32 +0900 Subject: [PATCH 0155/4438] Correct logging_context_format_string for Heat We should use "tenant" and "user" instead of "project_name" and "user_name" by calling setup_colorized_logging with these parameters. Change-Id: I47820c890bf4585e7c8f64c41f48d7576ca56862 Closes-Bug: 1237314 --- lib/heat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index ff9473ecdb..8acadb4ad1 100644 --- a/lib/heat +++ b/lib/heat @@ -86,7 +86,7 @@ function configure_heat() { iniset $HEAT_CONF DEFAULT use_syslog $SYSLOG if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output - setup_colorized_logging $HEAT_CONF DEFAULT + setup_colorized_logging $HEAT_CONF DEFAULT tenant user fi # keystone authtoken From b7fcf3f6c0b41bbba16dd52d124711e8e2b8bc9d Mon Sep 17 00:00:00 2001 From: Arata Notsu Date: Thu, 10 Oct 2013 17:56:21 +0900 Subject: [PATCH 0156/4438] Update diskimage-builder's URL diskimage-builder has moved from stackforge to openstack. Change-Id: I5bc8d5d162d7d671e062efd67992f15fbb4307b2 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 3a338d16f2..5c0baf7da9 100644 --- a/stackrc +++ b/stackrc @@ -160,7 +160,7 @@ TEMPEST_BRANCH=${TEMPEST_BRANCH:-master} # diskimage-builder -BM_IMAGE_BUILD_REPO=${BM_IMAGE_BUILD_REPO:-${GIT_BASE}/stackforge/diskimage-builder.git} +BM_IMAGE_BUILD_REPO=${BM_IMAGE_BUILD_REPO:-${GIT_BASE}/openstack/diskimage-builder.git} BM_IMAGE_BUILD_BRANCH=${BM_IMAGE_BUILD_BRANCH:-master} # bm_poseur From 84783c72fe31dbc7656cfb4b9ee0af947e5ce3ed Mon Sep 17 00:00:00 2001 From: Sergey Kraynev Date: Thu, 10 Oct 2013 09:08:48 -0400 Subject: [PATCH 0157/4438] Adding value for lock_path in configuration file Now oslo code include new lockutils. According this code if lock_path is not set in configuration file, will be raised Error message. So for updating lockutils in cinder project is needed lock_path definition in configuration file. Change-Id: I413f0a2ccec0f9d9e06acaa8cc06c41206d9dcc2 --- lib/cinder | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/cinder b/lib/cinder index ccf38b4dea..220488a07e 100644 --- a/lib/cinder +++ b/lib/cinder @@ -233,6 +233,7 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf" iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.contrib.standard_extensions iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH + iniset $CINDER_CONF DEFAULT lock_path $CINDER_STATE_PATH iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL if is_service_enabled ceilometer; then From 3931573f2bdb542ff4299bd548cab3458c3b0c99 Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Fri, 11 Oct 2013 00:12:22 -0500 Subject: [PATCH 0158/4438] mute useless console output, when run ./stack.sh Run ./stack.sh will dump ~400 lines of information, because of tar xvfz pip-*.tar.gz, and python setup.py install. We'd better mute stdout for the two steps, to make console cleaner Change-Id: Icf87947e020acb48d8cbe4cdcc1641f060e50f6d --- tools/install_pip.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 940bd8c84a..455323e6fa 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -72,9 +72,9 @@ function install_get_pip() { function install_pip_tarball() { (cd $FILES; \ curl -O $PIP_TAR_URL; \ - tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz; \ + tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz 1>/dev/null; \ cd pip-$INSTALL_PIP_VERSION; \ - sudo python setup.py install; \ + sudo python setup.py install 1>/dev/null; \ ) } From af15d35414abea1e0dd9792d3fffcffab47afc1c Mon Sep 17 00:00:00 2001 From: Arata Notsu Date: Fri, 11 Oct 2013 21:56:56 +0900 Subject: [PATCH 0159/4438] baremetal: Allow BM_SECOND_MAC to be unset Currently DevStack registers the second nic of the baremetal node even if BM_SECOND_MAC is not set or empty. However an interface with an empty mac address causes dhcp to fail (bug 1238595). And such operation will get to return a error after the bug is resolved. So we should not register the second nic if BM_SECOND_MAC is not set. Related-Bug: #1238595 Change-Id: Ib3cc77686b72311403ccacbd70ae9cf43e6eb4c9 --- lib/baremetal | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index 52af420853..f4d8589628 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -449,8 +449,10 @@ function add_baremetal_node() { "$mac_1" \ | grep ' id ' | get_field 2 ) [ $? -eq 0 ] || [ "$id" ] || die $LINENO "Error adding baremetal node" - id2=$(nova baremetal-interface-add "$id" "$mac_2" ) - [ $? -eq 0 ] || [ "$id2" ] || die $LINENO "Error adding interface to barmetal node $id" + if [ -n "$mac_2" ]; then + id2=$(nova baremetal-interface-add "$id" "$mac_2" ) + [ $? -eq 0 ] || [ "$id2" ] || die $LINENO "Error adding interface to barmetal node $id" + fi } From d5644f8b4f56b1aef0efc6ae869029df494c0a93 Mon Sep 17 00:00:00 2001 From: Florent Flament Date: Fri, 11 Oct 2013 15:39:09 +0200 Subject: [PATCH 0160/4438] Updates samples/localrc comment The SWIFT_DATA_DIR default value stated in the last comment of samples/localrc has been updated to match actual SWIFT_DATA_DIR default value ($DEST/data/swift instead of $DEST/swift/data). Addresses Bug: #1238665 Change-Id: I2510f72eb3eda467799202b356abb606930f4d94 --- samples/localrc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/samples/localrc b/samples/localrc index fd7221a0ae..80cf0e75ac 100644 --- a/samples/localrc +++ b/samples/localrc @@ -83,7 +83,8 @@ SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5 # Set this to 1 to save some resources: SWIFT_REPLICAS=1 -# The data for Swift is stored in the source tree by default (``$DEST/swift/data``) -# and can be moved by setting ``SWIFT_DATA_DIR``. The directory will be created +# The data for Swift is stored by default in (``$DEST/data/swift``), +# or (``$DATA_DIR/swift``) if ``DATA_DIR`` has been set, and can be +# moved by setting ``SWIFT_DATA_DIR``. The directory will be created # if it does not exist. SWIFT_DATA_DIR=$DEST/data From 8c032d1635320ad9b5162136a8876cc48e7fa8bd Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 23 Sep 2013 13:53:13 -0500 Subject: [PATCH 0161/4438] Convert remaining hypervisors to plugin model Convert BareMetal, OpenVZ, PowerVM hnd libvirt ypervisor configurations in Nova to the new plugin setup. Change-Id: I47d744a2c9fdda0771f5b473ec3b26fb099f7439 --- lib/nova | 153 ++++-------------------- lib/nova_plugins/hypervisor-baremetal | 93 +++++++++++++++ lib/nova_plugins/hypervisor-libvirt | 165 ++++++++++++++++++++++++++ lib/nova_plugins/hypervisor-openvz | 67 +++++++++++ lib/nova_plugins/hypervisor-powervm | 76 ++++++++++++ stack.sh | 95 +-------------- 6 files changed, 426 insertions(+), 223 deletions(-) create mode 100644 lib/nova_plugins/hypervisor-baremetal create mode 100644 lib/nova_plugins/hypervisor-libvirt create mode 100644 lib/nova_plugins/hypervisor-openvz create mode 100644 lib/nova_plugins/hypervisor-powervm diff --git a/lib/nova b/lib/nova index 4c5520785f..8deb3a01a9 100644 --- a/lib/nova +++ b/lib/nova @@ -71,23 +71,24 @@ QEMU_CONF=/etc/libvirt/qemu.conf NOVNC_DIR=$DEST/noVNC SPICE_DIR=$DEST/spice-html5 +# Set default defaults here as some hypervisor drivers override these +PUBLIC_INTERFACE_DEFAULT=br100 +GUEST_INTERFACE_DEFAULT=eth0 +FLAT_NETWORK_BRIDGE_DEFAULT=br100 + +# Get hypervisor configuration +# ---------------------------- + +NOVA_PLUGINS=$TOP_DIR/lib/nova_plugins +if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + # Load plugin + source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER +fi + # Nova Network Configuration # -------------------------- -# Set defaults according to the virt driver -if [ "$VIRT_DRIVER" = 'baremetal' ]; then - NETWORK_MANAGER=${NETWORK_MANAGER:-FlatManager} - PUBLIC_INTERFACE_DEFAULT=eth0 - FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} - FLAT_NETWORK_BRIDGE_DEFAULT=br100 - STUB_NETWORK=${STUB_NETWORK:-False} -else - PUBLIC_INTERFACE_DEFAULT=br100 - GUEST_INTERFACE_DEFAULT=eth0 - FLAT_NETWORK_BRIDGE_DEFAULT=br100 -fi - NETWORK_MANAGER=${NETWORK_MANAGER:-${NET_MAN:-FlatDHCPManager}} PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT} VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT} @@ -274,83 +275,6 @@ function configure_nova() { fi fi - # Prepare directories and packages for baremetal driver - if is_baremetal; then - configure_baremetal_nova_dirs - fi - - if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then - # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces - cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla -[libvirt Management Access] -Identity=unix-group:$LIBVIRT_GROUP -Action=org.libvirt.unix.manage -ResultAny=yes -ResultInactive=yes -ResultActive=yes -EOF" - elif is_suse && [[ $os_RELEASE = 12.2 || "$os_VENDOR" = "SUSE LINUX" ]]; then - # openSUSE < 12.3 or SLE - # Work around the fact that polkit-default-privs overrules pklas - # with 'unix-group:$group'. - sudo bash -c "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla -[libvirt Management Access] -Identity=unix-user:$USER -Action=org.libvirt.unix.manage -ResultAny=yes -ResultInactive=yes -ResultActive=yes -EOF" - else - # Starting with fedora 18 and opensuse-12.3 enable stack-user to - # virsh -c qemu:///system by creating a policy-kit rule for - # stack-user using the new Javascript syntax - rules_dir=/etc/polkit-1/rules.d - sudo mkdir -p $rules_dir - sudo bash -c "cat < $rules_dir/50-libvirt-$STACK_USER.rules -polkit.addRule(function(action, subject) { - if (action.id == 'org.libvirt.unix.manage' && - subject.user == '"$STACK_USER"') { - return polkit.Result.YES; - } -}); -EOF" - unset rules_dir - fi - fi - - # The user that nova runs as needs to be member of **libvirtd** group otherwise - # nova-compute will be unable to use libvirt. - if ! getent group $LIBVIRT_GROUP >/dev/null; then - sudo groupadd $LIBVIRT_GROUP - fi - add_user_to_group $STACK_USER $LIBVIRT_GROUP - - # libvirt detects various settings on startup, as we potentially changed - # the system configuration (modules, filesystems), we need to restart - # libvirt to detect those changes. - restart_service $LIBVIRT_DAEMON - fi - # Instance Storage # ---------------- @@ -368,6 +292,14 @@ EOF" fi fi fi + + # Rebuild the config file from scratch + create_nova_conf + + if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + # Configure hypervisor plugin + configure_nova_hypervisor + fi } # create_nova_accounts() - Set up common required nova accounts @@ -447,14 +379,6 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT ec2_workers "4" iniset $NOVA_CONF DEFAULT metadata_workers "4" iniset $NOVA_CONF DEFAULT sql_connection `database_connection_url nova` - if is_baremetal; then - iniset $NOVA_CONF baremetal sql_connection `database_connection_url nova_bm` - fi - if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" - iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" - iniset $NOVA_CONF DEFAULT use_usb_tablet "False" - fi iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF osapi_v3 enabled "True" @@ -646,37 +570,8 @@ function install_novaclient() { # install_nova() - Collect source and prepare function install_nova() { - if is_service_enabled n-cpu; then - if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then - install_nova_hypervisor - elif [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - if is_ubuntu; then - install_package kvm - install_package libvirt-bin - install_package python-libvirt - elif is_fedora || is_suse; then - install_package kvm - install_package libvirt - install_package libvirt-python - else - exit_distro_not_supported "libvirt installation" - fi - - # Install and configure **LXC** if specified. LXC is another approach to - # splitting a system into many smaller parts. LXC uses cgroups and chroot - # to simulate multiple systems. - if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then - if is_ubuntu; then - if [[ "$DISTRO" > natty ]]; then - install_package cgroup-lite - fi - else - ### FIXME(dtroyer): figure this out - echo "RPM-based cgroup not implemented yet" - yum_install libcgroup-tools - fi - fi - fi + if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + install_nova_hypervisor fi git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH diff --git a/lib/nova_plugins/hypervisor-baremetal b/lib/nova_plugins/hypervisor-baremetal new file mode 100644 index 0000000000..4e7c1734d1 --- /dev/null +++ b/lib/nova_plugins/hypervisor-baremetal @@ -0,0 +1,93 @@ +# lib/nova_plugins/hypervisor-baremetal +# Configure the baremetal hypervisor + +# Enable with: +# VIRT_DRIVER=baremetal + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +NETWORK_MANAGER=${NETWORK_MANAGER:-FlatManager} +PUBLIC_INTERFACE_DEFAULT=eth0 +FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} +FLAT_NETWORK_BRIDGE_DEFAULT=br100 +STUB_NETWORK=${STUB_NETWORK:-False} + + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor() { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor() { + configure_baremetal_nova_dirs + + iniset $NOVA_CONF baremetal sql_connection `database_connection_url nova_bm` + LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"} + iniset $NOVA_CONF DEFAULT compute_driver nova.virt.baremetal.driver.BareMetalDriver + iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER + iniset $NOVA_CONF DEFAULT scheduler_host_manager nova.scheduler.baremetal_host_manager.BaremetalHostManager + iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0 + iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0 + iniset $NOVA_CONF baremetal instance_type_extra_specs cpu_arch:$BM_CPU_ARCH + iniset $NOVA_CONF baremetal driver $BM_DRIVER + iniset $NOVA_CONF baremetal power_manager $BM_POWER_MANAGER + iniset $NOVA_CONF baremetal tftp_root /tftpboot + if [[ "$BM_DNSMASQ_FROM_NOVA_NETWORK" = "True" ]]; then + BM_DNSMASQ_CONF=$NOVA_CONF_DIR/dnsmasq-for-baremetal-from-nova-network.conf + sudo cp "$FILES/dnsmasq-for-baremetal-from-nova-network.conf" "$BM_DNSMASQ_CONF" + iniset $NOVA_CONF DEFAULT dnsmasq_config_file "$BM_DNSMASQ_CONF" + fi + + # Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``. + for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do + # Attempt to convert flags to options + iniset $NOVA_CONF baremetal ${I/=/ } + done +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor() { + # This function intentionally left blank + : +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor() { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor() { + # This function intentionally left blank + : +} + + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt new file mode 100644 index 0000000000..caf0296ad2 --- /dev/null +++ b/lib/nova_plugins/hypervisor-libvirt @@ -0,0 +1,165 @@ +# lib/nova_plugins/hypervisor-libvirt +# Configure the libvirt hypervisor + +# Enable with: +# VIRT_DRIVER=libvirt + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor() { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor() { + if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then + # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces + cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla +[libvirt Management Access] +Identity=unix-group:$LIBVIRT_GROUP +Action=org.libvirt.unix.manage +ResultAny=yes +ResultInactive=yes +ResultActive=yes +EOF" + elif is_suse && [[ $os_RELEASE = 12.2 || "$os_VENDOR" = "SUSE LINUX" ]]; then + # openSUSE < 12.3 or SLE + # Work around the fact that polkit-default-privs overrules pklas + # with 'unix-group:$group'. + sudo bash -c "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla +[libvirt Management Access] +Identity=unix-user:$USER +Action=org.libvirt.unix.manage +ResultAny=yes +ResultInactive=yes +ResultActive=yes +EOF" + else + # Starting with fedora 18 and opensuse-12.3 enable stack-user to + # virsh -c qemu:///system by creating a policy-kit rule for + # stack-user using the new Javascript syntax + rules_dir=/etc/polkit-1/rules.d + sudo mkdir -p $rules_dir + sudo bash -c "cat < $rules_dir/50-libvirt-$STACK_USER.rules +polkit.addRule(function(action, subject) { + if (action.id == 'org.libvirt.unix.manage' && + subject.user == '"$STACK_USER"') { + return polkit.Result.YES; + } +}); +EOF" + unset rules_dir + fi + fi + + # The user that nova runs as needs to be member of **libvirtd** group otherwise + # nova-compute will be unable to use libvirt. + if ! getent group $LIBVIRT_GROUP >/dev/null; then + sudo groupadd $LIBVIRT_GROUP + fi + add_user_to_group $STACK_USER $LIBVIRT_GROUP + + # libvirt detects various settings on startup, as we potentially changed + # the system configuration (modules, filesystems), we need to restart + # libvirt to detect those changes. + restart_service $LIBVIRT_DAEMON + + iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" + iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" + iniset $NOVA_CONF DEFAULT use_usb_tablet "False" + iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver" + LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} + iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" + # Power architecture currently does not support graphical consoles. + if is_arch "ppc64"; then + iniset $NOVA_CONF DEFAULT vnc_enabled "false" + fi +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor() { + if is_ubuntu; then + install_package kvm + install_package libvirt-bin + install_package python-libvirt + elif is_fedora || is_suse; then + install_package kvm + install_package libvirt + install_package libvirt-python + fi + + # Install and configure **LXC** if specified. LXC is another approach to + # splitting a system into many smaller parts. LXC uses cgroups and chroot + # to simulate multiple systems. + if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then + if is_ubuntu; then + if [[ "$DISTRO" > natty ]]; then + install_package cgroup-lite + fi + else + ### FIXME(dtroyer): figure this out + echo "RPM-based cgroup not implemented yet" + yum_install libcgroup-tools + fi + fi +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor() { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor() { + # This function intentionally left blank + : +} + + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/nova_plugins/hypervisor-openvz b/lib/nova_plugins/hypervisor-openvz new file mode 100644 index 0000000000..fc5ed0cd11 --- /dev/null +++ b/lib/nova_plugins/hypervisor-openvz @@ -0,0 +1,67 @@ +# lib/nova_plugins/hypervisor-openvz +# Configure the openvz hypervisor + +# Enable with: +# VIRT_DRIVER=openvz + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor() { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor() { + iniset $NOVA_CONF DEFAULT compute_driver "openvz.OpenVzDriver" + iniset $NOVA_CONF DEFAULT connection_type "openvz" + LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} + iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor() { + # This function intentionally left blank + : +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor() { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor() { + # This function intentionally left blank + : +} + + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/nova_plugins/hypervisor-powervm b/lib/nova_plugins/hypervisor-powervm new file mode 100644 index 0000000000..561dd9f00b --- /dev/null +++ b/lib/nova_plugins/hypervisor-powervm @@ -0,0 +1,76 @@ +# lib/nova_plugins/hypervisor-powervm +# Configure the PowerVM hypervisor + +# Enable with: +# VIRT_DRIVER=powervm + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor() { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor() { + POWERVM_MGR_TYPE=${POWERVM_MGR_TYPE:-"ivm"} + POWERVM_MGR_HOST=${POWERVM_MGR_HOST:-"powervm.host"} + POWERVM_MGR_USER=${POWERVM_MGR_USER:-"padmin"} + POWERVM_MGR_PASSWD=${POWERVM_MGR_PASSWD:-"password"} + POWERVM_IMG_REMOTE_PATH=${POWERVM_IMG_REMOTE_PATH:-"/tmp"} + POWERVM_IMG_LOCAL_PATH=${POWERVM_IMG_LOCAL_PATH:-"/tmp"} + iniset $NOVA_CONF DEFAULT compute_driver nova.virt.powervm.PowerVMDriver + iniset $NOVA_CONF DEFAULT powervm_mgr_type $POWERVM_MGR_TYPE + iniset $NOVA_CONF DEFAULT powervm_mgr $POWERVM_MGR_HOST + iniset $NOVA_CONF DEFAULT powervm_mgr_user $POWERVM_MGR_USER + iniset $NOVA_CONF DEFAULT powervm_mgr_passwd $POWERVM_MGR_PASSWD + iniset $NOVA_CONF DEFAULT powervm_img_remote_path $POWERVM_IMG_REMOTE_PATH + iniset $NOVA_CONF DEFAULT powervm_img_local_path $POWERVM_IMG_LOCAL_PATH +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor() { + # This function intentionally left blank + : +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor() { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor() { + # This function intentionally left blank + : +} + + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index 7cd7e30d70..112fbc081e 100755 --- a/stack.sh +++ b/stack.sh @@ -291,13 +291,6 @@ source $TOP_DIR/lib/ldap source $TOP_DIR/lib/ironic source $TOP_DIR/lib/trove -# Look for Nova hypervisor plugin -NOVA_PLUGINS=$TOP_DIR/lib/nova_plugins -if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then - # Load plugin - source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER -fi - # Set the destination directories for other OpenStack projects OPENSTACKCLIENT_DIR=$DEST/python-openstackclient @@ -882,6 +875,7 @@ if is_service_enabled g-reg; then init_glance fi + # Ironic # ------ @@ -891,7 +885,6 @@ if is_service_enabled ir-api ir-cond; then fi - # Neutron # ------- @@ -917,11 +910,6 @@ fi # Nova # ---- -if is_service_enabled nova; then - echo_summary "Configuring Nova" - configure_nova -fi - if is_service_enabled n-net q-dhcp; then # Delete traces of nova networks from prior runs # Do not kill any dnsmasq instance spawned by NetworkManager @@ -964,8 +952,6 @@ fi if is_service_enabled nova; then echo_summary "Configuring Nova" - # Rebuild the config file from scratch - create_nova_conf init_nova # Additional Nova configuration that is dependent on other services @@ -975,85 +961,6 @@ if is_service_enabled nova; then create_nova_conf_nova_network fi - - if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then - # Configure hypervisor plugin - configure_nova_hypervisor - - - # OpenVZ - # ------ - - elif [ "$VIRT_DRIVER" = 'openvz' ]; then - echo_summary "Using OpenVZ virtualization driver" - iniset $NOVA_CONF DEFAULT compute_driver "openvz.OpenVzDriver" - iniset $NOVA_CONF DEFAULT connection_type "openvz" - LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} - iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" - - - # Bare Metal - # ---------- - - elif [ "$VIRT_DRIVER" = 'baremetal' ]; then - echo_summary "Using BareMetal driver" - LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"} - iniset $NOVA_CONF DEFAULT compute_driver nova.virt.baremetal.driver.BareMetalDriver - iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER - iniset $NOVA_CONF DEFAULT scheduler_host_manager nova.scheduler.baremetal_host_manager.BaremetalHostManager - iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0 - iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0 - iniset $NOVA_CONF baremetal instance_type_extra_specs cpu_arch:$BM_CPU_ARCH - iniset $NOVA_CONF baremetal driver $BM_DRIVER - iniset $NOVA_CONF baremetal power_manager $BM_POWER_MANAGER - iniset $NOVA_CONF baremetal tftp_root /tftpboot - if [[ "$BM_DNSMASQ_FROM_NOVA_NETWORK" = "True" ]]; then - BM_DNSMASQ_CONF=$NOVA_CONF_DIR/dnsmasq-for-baremetal-from-nova-network.conf - sudo cp "$FILES/dnsmasq-for-baremetal-from-nova-network.conf" "$BM_DNSMASQ_CONF" - iniset $NOVA_CONF DEFAULT dnsmasq_config_file "$BM_DNSMASQ_CONF" - fi - - # Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``. - for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do - # Attempt to convert flags to options - iniset $NOVA_CONF baremetal ${I/=/ } - done - - - # PowerVM - # ------- - - elif [ "$VIRT_DRIVER" = 'powervm' ]; then - echo_summary "Using PowerVM driver" - POWERVM_MGR_TYPE=${POWERVM_MGR_TYPE:-"ivm"} - POWERVM_MGR_HOST=${POWERVM_MGR_HOST:-"powervm.host"} - POWERVM_MGR_USER=${POWERVM_MGR_USER:-"padmin"} - POWERVM_MGR_PASSWD=${POWERVM_MGR_PASSWD:-"password"} - POWERVM_IMG_REMOTE_PATH=${POWERVM_IMG_REMOTE_PATH:-"/tmp"} - POWERVM_IMG_LOCAL_PATH=${POWERVM_IMG_LOCAL_PATH:-"/tmp"} - iniset $NOVA_CONF DEFAULT compute_driver nova.virt.powervm.PowerVMDriver - iniset $NOVA_CONF DEFAULT powervm_mgr_type $POWERVM_MGR_TYPE - iniset $NOVA_CONF DEFAULT powervm_mgr $POWERVM_MGR_HOST - iniset $NOVA_CONF DEFAULT powervm_mgr_user $POWERVM_MGR_USER - iniset $NOVA_CONF DEFAULT powervm_mgr_passwd $POWERVM_MGR_PASSWD - iniset $NOVA_CONF DEFAULT powervm_img_remote_path $POWERVM_IMG_REMOTE_PATH - iniset $NOVA_CONF DEFAULT powervm_img_local_path $POWERVM_IMG_LOCAL_PATH - - - # Default libvirt - # --------------- - - else - echo_summary "Using libvirt virtualization driver" - iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver" - LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} - iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" - # Power architecture currently does not support graphical consoles. - if is_arch "ppc64"; then - iniset $NOVA_CONF DEFAULT vnc_enabled "false" - fi - fi - init_nova_cells fi From 893e66360caf3bcf0578d4541b3c17d089c33b02 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 13 Sep 2013 15:05:51 -0500 Subject: [PATCH 0162/4438] Add meta-config via local.conf This defines a new local.conf file that is designed to take the place of all of the 'pass-through'[1] configuration options that have been defined in DevStack. * new local.conf file can contain multiple config file settings to be merged in to existing project config files * localrc can be embedded into local.conf and will auto-extract if localrc does not exist * Adds functions get_meta_section(), get_meta_section_files(), merge_config_file() and merge_config_group() * Adds EXTRA_OPTS, EXTRA_BAREMETAL_OPTS, Q_DHCP_EXTRA_DEFAULT_OPTS and Q_SRV_EXTRA_DEFAULT_OPTS to the deprecated warning list at the end of stack.sh [1] Pass-through options are those that do not configure or change DevStack's behaviour but simply set a value in a project config file. This includes most of the EXTRA_XXX_OPTS configuration variables. Change-Id: I367cadc86116621e9574ac203aafdab483d810d3 --- README.md | 39 ++++++++++ functions | 16 ++++ lib/config | 130 +++++++++++++++++++++++++++++++ stack.sh | 102 ++++++++++++++++++++++++ stackrc | 6 +- tests/test_config.sh | 179 +++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 471 insertions(+), 1 deletion(-) create mode 100644 lib/config create mode 100755 tests/test_config.sh diff --git a/README.md b/README.md index 99e983887e..329b94d96f 100644 --- a/README.md +++ b/README.md @@ -244,3 +244,42 @@ To setup a cells environment add the following to your `localrc`: enable_service n-cell Be aware that there are some features currently missing in cells, one notable one being security groups. The exercises have been patched to disable functionality not supported by cells. + + +# Local Configuration + +Historically DevStack has used ``localrc`` to contain all local configuration and customizations. More and more of the configuration variables available for DevStack are passed-through to the individual project configuration files. The old mechanism for this required specific code for each file and did not scale well. This is handled now by a master local configuration file. + +# local.conf + +The new config file ``local.conf`` is an extended-INI format that introduces a new meta-section header that provides some additional information such as a phase name and destination config filename: + + [[ | ]] + +where is one of a set of phase names defined by ``stack.sh`` and is the project config filename. The filename is eval'ed in the stack.sh context so all environment variables are available and may be used. Using the project config file variables in the header is strongly suggested (see example of NOVA_CONF below). If the path of the config file does not exist it is skipped. + +The defined phases are: + +* local - extracts ``localrc`` from ``local.conf`` before ``stackrc`` is sourced +* post-config - runs after the layer 2 services are configured and before they are started +* extra - runs after services are started and before any files in ``extra.d`` are executes + +The file is processed strictly in sequence; meta-sections may be specified more than once but if any settings are duplicated the last to appear in the file will be used. + + [[post-config|$NOVA_CONF]] + [DEFAULT] + use_syslog = True + + [osapi_v3] + enabled = False + +A specific meta-section ``local:localrc`` is used to provide a default localrc file. This allows all custom settings for DevStack to be contained in a single file. ``localrc`` is not overwritten if it exists to preserve compatability. + + [[local|localrc]] + FIXED_RANGE=10.254.1.0/24 + ADMIN_PASSWORD=speciale + LOGFILE=$DEST/logs/stack.sh.log + +Note that ``Q_PLUGIN_CONF_FILE`` is unique in that it is assumed to _NOT_ start with a ``/`` (slash) character. A slash will need to be added: + + [[post-config|/$Q_PLUGIN_CONF_FILE]] diff --git a/functions b/functions index f996ba89ab..87586eb17c 100644 --- a/functions +++ b/functions @@ -155,6 +155,22 @@ function err_if_not_set() { } +# Prints line number and "message" in warning format +# warn $LINENO "message" +function warn() { + local exitcode=$? + errXTRACE=$(set +o | grep xtrace) + set +o xtrace + local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2" + echo $msg 1>&2; + if [[ -n ${SCREEN_LOGDIR} ]]; then + echo $msg >> "${SCREEN_LOGDIR}/error.log" + fi + $errXTRACE + return $exitcode +} + + # HTTP and HTTPS proxy servers are supported via the usual environment variables [1] # ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in # ``localrc`` or on the command line if necessary:: diff --git a/lib/config b/lib/config new file mode 100644 index 0000000000..6f686e9b5d --- /dev/null +++ b/lib/config @@ -0,0 +1,130 @@ +# lib/config - Configuration file manipulation functions + +# These functions have no external dependencies and the following side-effects: +# +# CONFIG_AWK_CMD is defined, default is ``awk`` + +# Meta-config files contain multiple INI-style configuration files +# using a specific new section header to delimit them: +# +# [[group-name|file-name]] +# +# group-name refers to the group of configuration file changes to be processed +# at a particular time. These are called phases in ``stack.sh`` but +# group here as these functions are not DevStack-specific. +# +# file-name is the destination of the config file + +# Save trace setting +C_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Allow the awk command to be overridden on legacy platforms +CONFIG_AWK_CMD=${CONFIG_AWK_CMD:-awk} + +# Get the section for the specific group and config file +# get_meta_section infile group configfile +function get_meta_section() { + local file=$1 + local matchgroup=$2 + local configfile=$3 + + [[ -r $file ]] || return 0 + [[ -z $configfile ]] && return 0 + + $CONFIG_AWK_CMD -v matchgroup=$matchgroup -v configfile=$configfile ' + BEGIN { group = "" } + /^\[\[.+|.*\]\]/ { + if (group == "") { + gsub("[][]", "", $1); + split($1, a, "|"); + if (a[1] == matchgroup && a[2] == configfile) { + group=a[1] + } + } else { + group="" + } + next + } + { + if (group != "") + print $0 + } + ' $file +} + + +# Get a list of config files for a specific group +# get_meta_section_files infile group +function get_meta_section_files() { + local file=$1 + local matchgroup=$2 + + [[ -r $file ]] || return 0 + + $CONFIG_AWK_CMD -v matchgroup=$matchgroup ' + /^\[\[.+\|.*\]\]/ { + gsub("[][]", "", $1); + split($1, a, "|"); + if (a[1] == matchgroup) + print a[2] + } + ' $file +} + + +# Merge the contents of a meta-config file into its destination config file +# If configfile does not exist it will be created. +# merge_config_file infile group configfile +function merge_config_file() { + local file=$1 + local matchgroup=$2 + local configfile=$3 + + [[ -r $configfile ]] || touch $configfile + + get_meta_section $file $matchgroup $configfile | \ + $CONFIG_AWK_CMD -v configfile=$configfile ' + BEGIN { section = "" } + /^\[.+\]/ { + gsub("[][]", "", $1); + section=$1 + next + } + /^ *\#/ { + next + } + /^.+/ { + split($0, d, " *= *") + print "iniset " configfile " " section " " d[1] " \"" d[2] "\"" + } + ' | while read a; do eval "$a"; done + +} + + +# Merge all of the files specified by group +# merge_config_group infile group [group ...] +function merge_config_group() { + local localfile=$1; shift + local matchgroups=$@ + + [[ -r $localfile ]] || return 0 + + for group in $matchgroups; do + for configfile in $(get_meta_section_files $localfile $group); do + if [[ -d $(dirname $configfile) ]]; then + merge_config_file $localfile $group $configfile + fi + done + done +} + + +# Restore xtrace +$C_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index be04bedade..f6ab4c49f6 100755 --- a/stack.sh +++ b/stack.sh @@ -29,6 +29,9 @@ TOP_DIR=$(cd $(dirname "$0") && pwd) # Import common functions source $TOP_DIR/functions +# Import config functions +source $TOP_DIR/lib/config + # Determine what system we are running on. This provides ``os_VENDOR``, # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` # and ``DISTRO`` @@ -38,6 +41,25 @@ GetDistro # Global Settings # =============== +# Check for a ``localrc`` section embedded in ``local.conf`` and extract if +# ``localrc`` does not already exist + +# Phase: local +rm -f $TOP_DIR/.localrc.auto +if [[ -r $TOP_DIR/local.conf ]]; then + LRC=$(get_meta_section_files $TOP_DIR/local.conf local) + for lfile in $LRC; do + if [[ "$lfile" == "localrc" ]]; then + if [[ -r $TOP_DIR/localrc ]]; then + warn $LINENO "localrc and local.conf:[[local]] both exist, using localrc" + else + echo "# Generated file, do not exit" >$TOP_DIR/.localrc.auto + get_meta_section $TOP_DIR/local.conf local $lfile >>$TOP_DIR/.localrc.auto + fi + fi + done +fi + # ``stack.sh`` is customizable by setting environment variables. Override a # default setting via export:: # @@ -842,6 +864,9 @@ if is_service_enabled sysstat;then fi +# Start Services +# ============== + # Keystone # -------- @@ -1153,6 +1178,14 @@ if is_service_enabled nova && is_baremetal; then fi +# Local Configuration +# =================== + +# Apply configuration from local.conf if it exists for layer 2 services +# Phase: post-config +merge_config_group $TOP_DIR/local.conf post-config + + # Launch Services # =============== @@ -1348,6 +1381,14 @@ for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ done +# Local Configuration +# =================== + +# Apply configuration from local.conf if it exists for layer 2 services +# Phase: extra +merge_config_group $TOP_DIR/local.conf extra + + # Run extras # ========== @@ -1420,5 +1461,66 @@ if [[ -n "$DEPRECATED_TEXT" ]]; then echo_summary "WARNING: $DEPRECATED_TEXT" fi +# Specific warning for deprecated configs +if [[ -n "$EXTRA_OPTS" ]]; then + echo "" + echo_summary "WARNING: EXTRA_OPTS is used" + echo "You are using EXTRA_OPTS to pass configuration into nova.conf." + echo "Please convert that configuration in localrc to a nova.conf section in local.conf:" + echo " +[[post-config|\$NOVA_CONF]] +[DEFAULT] +" + for I in "${EXTRA_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + echo ${I} + done +fi + +if [[ -n "$EXTRA_BAREMETAL_OPTS" ]]; then + echo "" + echo_summary "WARNING: EXTRA_OPTS is used" + echo "You are using EXTRA_OPTS to pass configuration into nova.conf." + echo "Please convert that configuration in localrc to a nova.conf section in local.conf:" + echo " +[[post-config|\$NOVA_CONF]] +[baremetal] +" + for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + echo ${I} + done +fi + +if [[ -n "$Q_DHCP_EXTRA_DEFAULT_OPTS" ]]; then + echo "" + echo_summary "WARNING: Q_DHCP_EXTRA_DEFAULT_OPTS is used" + echo "You are using Q_DHCP_EXTRA_DEFAULT_OPTS to pass configuration into $Q_DHCP_CONF_FILE." + echo "Please convert that configuration in localrc to a $Q_DHCP_CONF_FILE section in local.conf:" + echo " +[[post-config|\$Q_DHCP_CONF_FILE]] +[DEFAULT] +" + for I in "${Q_DHCP_EXTRA_DEFAULT_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + echo ${I} + done +fi + +if [[ -n "$Q_SRV_EXTRA_DEFAULT_OPTS" ]]; then + echo "" + echo_summary "WARNING: Q_SRV_EXTRA_DEFAULT_OPTS is used" + echo "You are using Q_SRV_EXTRA_DEFAULT_OPTS to pass configuration into $NEUTRON_CONF." + echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:" + echo " +[[post-config|\$NEUTRON_CONF]] +[DEFAULT] +" + for I in "${Q_SRV_EXTRA_DEFAULT_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + echo ${I} + done +fi + # Indicate how long this took to run (bash maintained variable ``SECONDS``) echo_summary "stack.sh completed in $SECONDS seconds." diff --git a/stackrc b/stackrc index 3a338d16f2..e4a96160d1 100644 --- a/stackrc +++ b/stackrc @@ -48,8 +48,12 @@ IDENTITY_API_VERSION=2.0 USE_SCREEN=True # allow local overrides of env variables, including repo config -if [ -f $RC_DIR/localrc ]; then +if [[ -f $RC_DIR/localrc ]]; then + # Old-style user-supplied config source $RC_DIR/localrc +elif [[ -f $RC_DIR/.localrc.auto ]]; then + # New-style user-supplied config extracted from local.conf + source $RC_DIR/.localrc.auto fi diff --git a/tests/test_config.sh b/tests/test_config.sh new file mode 100755 index 0000000000..fed2e7d477 --- /dev/null +++ b/tests/test_config.sh @@ -0,0 +1,179 @@ +#!/usr/bin/env bash + +# Tests for DevStack meta-config functions + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP/functions + +# Import config functions +source $TOP/lib/config + +# check_result() tests and reports the result values +# check_result "actual" "expected" +function check_result() { + local actual=$1 + local expected=$2 + if [[ "$actual" == "$expected" ]]; then + echo "OK" + else + echo -e "failed: $actual != $expected\n" + fi +} + +TEST_1C_ADD="[eee] +type=new +multi = foo2" + +function create_test1c() { + cat >test1c.conf <test2a.conf <test.conf < Date: Mon, 14 Oct 2013 00:51:10 -0500 Subject: [PATCH 0163/4438] remove useless step in cleanup_rpc_backend It shall not make dir of /var/run/openstack for the cleanup operation. install_rpc_backend will make the directory, which is covered by another take care of this. Change-Id: I2bf1bfb4a6b409cc04f2d7b94dd58627e0134b71 --- lib/rpc_backend | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index ff87aae2af..c05bd8cb2a 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -86,10 +86,6 @@ function cleanup_rpc_backend { else exit_distro_not_supported "zeromq installation" fi - - # Necessary directory for socket location. - sudo mkdir -p /var/run/openstack - sudo chown $STACK_USER /var/run/openstack fi } From b5e11ff87409a6cac67378715379f739daaa2b0b Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Mon, 14 Oct 2013 00:53:37 -0500 Subject: [PATCH 0164/4438] fix typo in functions Change-Id: I0d09d6d4f4405d3dc96f7a9eed62f87e5d3f8bc1 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 01e2dfc8fd..6f09630ebd 100644 --- a/functions +++ b/functions @@ -2,7 +2,7 @@ # # The following variables are assumed to be defined by certain functions: # ``ENABLED_SERVICES`` -# ``EROR_ON_CLONE`` +# ``ERROR_ON_CLONE`` # ``FILES`` # ``GLANCE_HOSTPORT`` # ``OFFLINE`` From fdc9ae8b9e52004a4fa0a4e0cf7df67f81cba955 Mon Sep 17 00:00:00 2001 From: Min Li Date: Wed, 9 Oct 2013 15:45:41 -0400 Subject: [PATCH 0165/4438] Fix bugs for installing docker, bug #1237581. -The change in install_docker.sh corrects a typo ('=' should be '-'). This typo resutls in 'unable to locate the packet' error when executing apt-get. -The second change is in hypervisor-docker fix the error for reporting docker is not installed when docker is actually set up. The original line missed the version part of the package name. Change-Id: Ic48f45158cf84f89080f095d53c355e9f6969bfd --- lib/nova_plugins/hypervisor-docker | 2 +- tools/docker/install_docker.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index 4c8fc279b0..427554b7db 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -72,7 +72,7 @@ function install_nova_hypervisor() { fi # Make sure Docker is installed - if ! is_package_installed lxc-docker; then + if ! is_package_installed lxc-docker-${DOCKER_PACKAGE_VERSION}; then die $LINENO "Docker is not installed. Please run tools/docker/install_docker.sh" fi diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh index 289002e8e7..483955bfc2 100755 --- a/tools/docker/install_docker.sh +++ b/tools/docker/install_docker.sh @@ -38,7 +38,7 @@ curl https://get.docker.io/gpg | sudo apt-key add - install_package python-software-properties && \ sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list" apt_get update -install_package --force-yes lxc-docker=${DOCKER_PACKAGE_VERSION} socat +install_package --force-yes lxc-docker-${DOCKER_PACKAGE_VERSION} socat # Start the daemon - restart just in case the package ever auto-starts... restart_service docker From 83dcf2046060b275373993959b118bb2f3f3ff58 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Sun, 29 Sep 2013 21:45:49 +0100 Subject: [PATCH 0166/4438] XenAPI: Get pool default SR rather than "Local storage" Match the nova default of using the Pool default SR as the storage for VMs Change-Id: I3b681ae7062ba0db3d9eab70b410b7d9658e37c9 --- tools/xen/functions | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tools/xen/functions b/tools/xen/functions index a5c4b70bc3..a1d56568a8 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -69,11 +69,17 @@ function install_xapi_plugins_from { } function get_local_sr { - xe sr-list name-label="Local storage" --minimal + xe pool-list params=default-SR minimal=true } function get_local_sr_path { - echo "/var/run/sr-mount/$(get_local_sr)" + pbd_path="/var/run/sr-mount/$(get_local_sr)" + pbd_device_config_path=`xe pbd-list sr-uuid=$(get_local_sr) params=device-config | grep " path: "` + if [ -n "$pbd_device_config_path" ]; then + pbd_uuid=`xe pbd-list sr-uuid=$(get_local_sr) minimal=true` + pbd_path=`xe pbd-param-get uuid=$pbd_uuid param-name=device-config param-key=path || echo ""` + fi + echo $pbd_path } function find_ip_by_name() { From 557744faecc2f6701d1babf5060a771069b22e94 Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Mon, 14 Oct 2013 09:50:13 -0500 Subject: [PATCH 0167/4438] refine rabbit cleanup Beside it's good to killall -9 epmd, if killall epmd fails Change-Id: Ide90ef8ac3339bd70d991666ce0d3550a088670b --- lib/rpc_backend | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index ff87aae2af..61908c41f4 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -63,7 +63,7 @@ function cleanup_rpc_backend { if is_service_enabled rabbit; then # Obliterate rabbitmq-server uninstall_package rabbitmq-server - sudo killall epmd + sudo killall epmd || sudo killall -9 epmd if is_ubuntu; then # And the Erlang runtime too sudo aptitude purge -y ~nerlang From 4fb255cf41d367cd8cc16a0e2d090f1c0733aa84 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 14 Oct 2013 14:07:00 -0400 Subject: [PATCH 0168/4438] add bash8 tool (like pep8, but way hackier) unlike our python code, we have no automatic style checking for bash. For the most part, it's not a big deal, but errant whitespace or incorrect indenting is sometimes annoying to have to -1 people's patches for. Instead of constantly picking it up in manual review maybe we can do better. This is an uber hacky script which could be used to do just that. ./tools/bash8.py file1 file2 file3 ... And it will show issues found with the files at hand. Lightly tested in the existing devstack tree, it exposes a few issues that we might want to think about. This should be python 3 compatible, and includes argparse to provide a basic '-h' support to explain how the command should be run. Change-Id: I5009fa5852595c2953a548e430e5e1ce06ae94e0 --- tools/bash8.py | 84 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100755 tools/bash8.py diff --git a/tools/bash8.py b/tools/bash8.py new file mode 100755 index 0000000000..82a10107e1 --- /dev/null +++ b/tools/bash8.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# bash8 - a pep8 equivalent for bash scripts +# +# this program attempts to be an automated style checker for bash scripts +# to fill the same part of code review that pep8 does in most OpenStack +# projects. It starts from humble beginnings, and will evolve over time. +# +# Currently Supported checks +# +# Errors +# - E001: check that lines do not end with trailing whitespace +# - E002: ensure that indents are only spaces, and not hard tabs +# - E003: ensure all indents are a multiple of 4 spaces + +import argparse +import fileinput +import re +import sys + + +ERRORS = 0 + + +def print_error(error, line): + global ERRORS + ERRORS = ERRORS + 1 + print("%s: '%s'" % (error, line.rstrip('\n'))) + print(" - %s: L%s" % (fileinput.filename(), fileinput.filelineno())) + + +def check_no_trailing_whitespace(line): + if re.search('[ \t]+$', line): + print_error('E001: Trailing Whitespace', line) + + +def check_indents(line): + m = re.search('^(?P[ \t]+)', line) + if m: + if re.search('\t', m.group('indent')): + print_error('E002: Tab indents', line) + if (len(m.group('indent')) % 4) != 0: + print_error('E003: Indent not multiple of 4', line) + + +def check_files(files): + for line in fileinput.input(files): + check_no_trailing_whitespace(line) + check_indents(line) + + +def get_options(): + parser = argparse.ArgumentParser( + description='A bash script style checker') + parser.add_argument('files', metavar='file', nargs='+', + help='files to scan for errors') + return parser.parse_args() + + +def main(): + opts = get_options() + check_files(opts.files) + + if ERRORS > 0: + print("%d bash8 error(s) found" % ERRORS) + return 1 + else: + return 0 + + +if __name__ == "__main__": + sys.exit(main()) From e0f4065afdd591d2511a4d8689dacab98392b331 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 14 Oct 2013 17:46:51 -0400 Subject: [PATCH 0169/4438] add a simple run_tests.sh to use bash8 this gives a simple way to run against all the files that we'd want to check with bash8. Currently clocking in at 300+ errors (no false pos so far that I see). Change-Id: Idd83b0bf61029b49bb28ad8b6e6261ecbf927555 --- run_tests.sh | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100755 run_tests.sh diff --git a/run_tests.sh b/run_tests.sh new file mode 100755 index 0000000000..9d9d18661e --- /dev/null +++ b/run_tests.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# +# this runs a series of unit tests for devstack to ensure it's functioning + +if [[ -n $@ ]]; then + FILES=$@ +else + LIBS=`find lib -type f | grep -v \.md` + SCRIPTS=`find . -type f -name \*\.sh` + EXTRA="functions" + FILES="$SCRIPTS $LIBS $EXTRA" +fi + +echo "Running bash8..." + +./tools/bash8.py $FILES From 48e1bab5423b8bfa5c5f48736fa0af99e6f0f8fc Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Fri, 11 Oct 2013 22:06:25 -0500 Subject: [PATCH 0170/4438] Fix false negative, when HEAT_CREATE_TEST_IMAGE is unset Fix shell variable comparision bug Closes-Bug: #1239041 Change-Id: Ifbc8545f929eb7bbf9b85df889dfd9fa3a96b7c0 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index bc0b18d9f4..9f41608187 100644 --- a/lib/tempest +++ b/lib/tempest @@ -266,7 +266,7 @@ function configure_tempest() { iniset $TEMPEST_CONF boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # Orchestration test image - if [ $HEAT_CREATE_TEST_IMAGE == "True" ]; then + if [[ "$HEAT_CREATE_TEST_IMAGE" = "True" ]]; then disk_image_create /usr/share/tripleo-image-elements "vm fedora heat-cfntools" "i386" "fedora-vm-heat-cfntools-tempest" iniset $TEMPEST_CONF orchestration image_ref "fedora-vm-heat-cfntools-tempest" fi From c48c3124c87de2c233c2596e1a759106b598b22b Mon Sep 17 00:00:00 2001 From: Roman Prykhodchenko Date: Tue, 1 Oct 2013 17:19:05 +0300 Subject: [PATCH 0171/4438] Enable keystone authentication in Ironic Currently Ironic installation script leaves authenticaiton strategy in its default value which is noauth. This is not relevant for the most of development and testing environments. This patch sets authentication strategy for Ironic to keystone and specifies the path to the policy file. Closes-bug: #1233612 Change-Id: Idacbda05663e7ef949cbce0dbdf28eaa36b6a1a9 --- lib/ironic | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ironic b/lib/ironic index f3b4a72f66..8c1f52b330 100644 --- a/lib/ironic +++ b/lib/ironic @@ -79,6 +79,8 @@ function configure_ironic() { # configure_ironic_api() - Is used by configure_ironic(). Performs # API specific configuration. function configure_ironic_api() { + iniset $IRONIC_CONF_FILE DEFAULT auth_strategy keystone + iniset $IRONIC_CONF_FILE DEFAULT policy_file $IRONIC_POLICY_JSON iniset $IRONIC_CONF_FILE keystone_authtoken auth_host $KEYSTONE_AUTH_HOST iniset $IRONIC_CONF_FILE keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $IRONIC_CONF_FILE keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL From 43e00660c30d5f7b78d9eacfe2540a0c92fe5bb9 Mon Sep 17 00:00:00 2001 From: Roman Prykhodchenko Date: Tue, 15 Oct 2013 17:03:15 +0300 Subject: [PATCH 0172/4438] Install Ironic client Since python-ironicclient was published to github it's reasonable to include it to the default Ironic set up. Change-Id: Id1d0209959a3b482977b5e710c0885c714ad7e10 --- lib/ironic | 20 ++++++++++++++------ stack.sh | 1 + stackrc | 4 ++++ 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/lib/ironic b/lib/ironic index f3b4a72f66..89d0edc1a4 100644 --- a/lib/ironic +++ b/lib/ironic @@ -11,6 +11,7 @@ # ``stack.sh`` calls the entry points in this order: # # install_ironic +# install_ironicclient # configure_ironic # init_ironic # start_ironic @@ -27,6 +28,7 @@ set +o xtrace # Set up default directories IRONIC_DIR=$DEST/ironic +IRONICCLIENT_DIR=$DEST/python-ironicclient IRONIC_AUTH_CACHE_DIR=${IRONIC_AUTH_CACHE_DIR:-/var/cache/ironic} IRONIC_CONF_DIR=${IRONIC_CONF_DIR:-/etc/ironic} IRONIC_CONF_FILE=$IRONIC_CONF_DIR/ironic.conf @@ -45,6 +47,18 @@ IRONIC_HOSTPORT=${IRONIC_HOSTPORT:-$SERVICE_HOST:6385} # Functions # --------- +# install_ironic() - Collect source and prepare +function install_ironic() { + git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH + setup_develop $IRONIC_DIR +} + +# install_ironicclient() - Collect sources and prepare +function install_ironicclient() { + git_clone $IRONICCLIENT_REPO $IRONICCLIENT_DIR $IRONICCLIENT_BRANCH + setup_develop $IRONICCLIENT_DIR +} + # cleanup_ironic() - Remove residual data files, anything left over from previous # runs that would need to clean up. function cleanup_ironic() { @@ -170,12 +184,6 @@ function init_ironic() { create_ironic_accounts } -# install_ironic() - Collect source and prepare -function install_ironic() { - git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH - setup_develop $IRONIC_DIR -} - # start_ironic() - Start running processes, including screen function start_ironic() { # Start Ironic API server, if enabled. diff --git a/stack.sh b/stack.sh index 14ec023a51..2501cd0eb4 100755 --- a/stack.sh +++ b/stack.sh @@ -722,6 +722,7 @@ fi if is_service_enabled ir-api ir-cond; then install_ironic + install_ironicclient configure_ironic fi diff --git a/stackrc b/stackrc index 3f740b5678..0151672c1d 100644 --- a/stackrc +++ b/stackrc @@ -104,6 +104,10 @@ HORIZON_BRANCH=${HORIZON_BRANCH:-master} IRONIC_REPO=${IRONIC_REPO:-${GIT_BASE}/openstack/ironic.git} IRONIC_BRANCH=${IRONIC_BRANCH:-master} +# ironic client +IRONICCLIENT_REPO=${IRONICCLIENT_REPO:-${GIT_BASE}/openstack/python-ironicclient.git} +IRONICCLIENT_BRANCH=${IRONICCLIENT_BRANCH:-master} + # unified auth system (manages accounts/tokens) KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git} KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master} From cdf3d766478d04e62a860754298e7d86f89b33a9 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 15 Oct 2013 09:42:43 -0500 Subject: [PATCH 0173/4438] Add stack phases to extras.d handling Add hooks to stack.sh, unstack.sh and clean.sh to call the extras.d scripts at multiple points in stack.sh. This allows these scripts to perform installation and startup tasks at similar times as they would if integrated into stack.sh. extras.d/70-tempest.sh is present as an example of the structure of these scripts. See extras.d/README.md for more information. Change-Id: Ic1fe522559b94d204d6c0319a2e3d23684c8d028 --- README.md | 4 ++++ clean.sh | 19 +++++++++++++++++++ extras.d/80-tempest.sh | 32 ++++++++++++++++++++------------ extras.d/README | 14 -------------- extras.d/README.md | 31 +++++++++++++++++++++++++++++++ stack.sh | 34 +++++++++++++++++++++++++++++++++- unstack.sh | 11 +++++++++++ 7 files changed, 118 insertions(+), 27 deletions(-) delete mode 100644 extras.d/README create mode 100644 extras.d/README.md diff --git a/README.md b/README.md index 66e36b22a8..514786c60f 100644 --- a/README.md +++ b/README.md @@ -215,6 +215,10 @@ If tempest has been successfully configured, a basic set of smoke tests can be r $ cd /opt/stack/tempest $ nosetests tempest/scenario/test_network_basic_ops.py +# Additional Projects + +DevStack has a hook mechanism to call out to a dispatch script at specific points in the execution if `stack.sh`, `unstack.sh` and `clean.sh`. This allows higher-level projects, especially those that the lower level projects have no dependency on, to be added to DevStack without modifying the scripts. Tempest is built this way as an example of how to structure the dispatch script, see `extras.d/80-tempest.sh`. See `extras.d/README.md` for more information. + # Multi-Node Setup A more interesting setup involves running multiple compute nodes, with Neutron networks connecting VMs on different compute nodes. diff --git a/clean.sh b/clean.sh index 6ceb5a4933..395941ae21 100755 --- a/clean.sh +++ b/clean.sh @@ -47,6 +47,15 @@ source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap +# Extras Source +# -------------- + +# Phase: source +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i source + done +fi # See if there is anything running... # need to adapt when run_service is merged @@ -56,6 +65,16 @@ if [[ -n "$SESSION" ]]; then $TOP_DIR/unstack.sh --all fi +# Run extras +# ========== + +# Phase: clean +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i clean + done +fi + # Clean projects cleanup_oslo cleanup_cinder diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh index f159955726..75b702c700 100644 --- a/extras.d/80-tempest.sh +++ b/extras.d/80-tempest.sh @@ -1,21 +1,29 @@ # tempest.sh - DevStack extras script -source $TOP_DIR/lib/tempest - -if [[ "$1" == "stack" ]]; then - # Configure Tempest last to ensure that the runtime configuration of - # the various OpenStack services can be queried. - if is_service_enabled tempest; then - echo_summary "Configuring Tempest" +if is_service_enabled tempest; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/tempest + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + echo_summary "Installing Tempest" install_tempest + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + # Tempest config must come after layer 2 services are running + : + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + echo_summary "Initializing Tempest" configure_tempest init_tempest fi -fi -if [[ "$1" == "unstack" ]]; then - # no-op - : -fi + if [[ "$1" == "unstack" ]]; then + # no-op + : + fi + if [[ "$1" == "clean" ]]; then + # no-op + : + fi +fi diff --git a/extras.d/README b/extras.d/README deleted file mode 100644 index ffc6793abd..0000000000 --- a/extras.d/README +++ /dev/null @@ -1,14 +0,0 @@ -The extras.d directory contains project initialization scripts to be -sourced by stack.sh at the end of its run. This is expected to be -used by external projects that want to be configured, started and -stopped with DevStack. - -Order is controlled by prefixing the script names with the a two digit -sequence number. Script names must end with '.sh'. This provides a -convenient way to disable scripts by simoy renaming them. - -DevStack reserves the sequence numbers 00 through 09 and 90 through 99 -for its own use. - -The scripts are called with an argument of 'stack' by stack.sh and -with an argument of 'unstack' by unstack.sh. diff --git a/extras.d/README.md b/extras.d/README.md new file mode 100644 index 0000000000..591e438b02 --- /dev/null +++ b/extras.d/README.md @@ -0,0 +1,31 @@ +# Extras Hooks + +The `extras.d` directory contains project dispatch scripts that are called +at specific times by `stack.sh`, `unstack.sh` and `clean.sh`. These hooks are +used to install, configure and start additional projects during a DevStack run +without any modifications to the base DevStack scripts. + +When `stack.sh` reaches one of the hook points it sources the scripts in `extras.d` +that end with `.sh`. To control the order that the scripts are sourced their +names start with a two digit sequence number. DevStack reserves the sequence +numbers 00 through 09 and 90 through 99 for its own use. + +The scripts are sourced at each hook point so they should not declare anything +at the top level that would cause a problem, specifically, functions. This does +allow the entire `stack.sh` variable space to be available. The scripts are +sourced with one or more arguments, the first of which defines the hook phase: + +arg 1: source | stack | unstack | clean + + source: always called first in any of the scripts, used to set the + initial defaults in a lib/* script or similar + + stack: called by stack.sh. There are three possible values for + the second arg to distinguish the phase stack.sh is in: + + arg 2: install | post-config | extra + + unstack: called by unstack.sh + + clean: called by clean.sh. Remember, clean.sh also calls unstack.sh + so that work need not be repeated. diff --git a/stack.sh b/stack.sh index 14ec023a51..aa0efea487 100755 --- a/stack.sh +++ b/stack.sh @@ -313,6 +313,16 @@ source $TOP_DIR/lib/ldap source $TOP_DIR/lib/ironic source $TOP_DIR/lib/trove +# Extras Source +# -------------- + +# Phase: source +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i source + done +fi + # Set the destination directories for other OpenStack projects OPENSTACKCLIENT_DIR=$DEST/python-openstackclient @@ -725,6 +735,16 @@ if is_service_enabled ir-api ir-cond; then configure_ironic fi +# Extras Install +# -------------- + +# Phase: install +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i stack install + done +fi + if [[ $TRACK_DEPENDS = True ]]; then $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then @@ -1000,6 +1020,17 @@ if is_service_enabled nova && is_baremetal; then fi +# Extras Configuration +# ==================== + +# Phase: post-config +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i stack post-config + done +fi + + # Local Configuration # =================== @@ -1214,9 +1245,10 @@ merge_config_group $TOP_DIR/local.conf extra # Run extras # ========== +# Phase: extra if [[ -d $TOP_DIR/extras.d ]]; then for i in $TOP_DIR/extras.d/*.sh; do - [[ -r $i ]] && source $i stack + [[ -r $i ]] && source $i stack extra done fi diff --git a/unstack.sh b/unstack.sh index c944ccc0fb..67c8b7c7b1 100755 --- a/unstack.sh +++ b/unstack.sh @@ -42,6 +42,16 @@ source $TOP_DIR/lib/neutron source $TOP_DIR/lib/ironic source $TOP_DIR/lib/trove +# Extras Source +# -------------- + +# Phase: source +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i source + done +fi + # Determine what system we are running on. This provides ``os_VENDOR``, # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` GetOSVersion @@ -53,6 +63,7 @@ fi # Run extras # ========== +# Phase: unstack if [[ -d $TOP_DIR/extras.d ]]; then for i in $TOP_DIR/extras.d/*.sh; do [[ -r $i ]] && source $i unstack From 75e851a6de99d57eaab3e682b249067cb6065cd0 Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Wed, 16 Oct 2013 08:34:05 +0000 Subject: [PATCH 0174/4438] exercices: aggregates needs to be more flexible The actual regex checks a result in python format and because of the change in the bug 1132961, Jekins failed. I have update the regex to work with the old result and the new result. Change-Id: I393e1358f99be5f20d9ac8b3e214355a453ecfcb Closes-Bug: 1239726 --- exercises/aggregates.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index e2baecdb11..e5fc7dec84 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -100,7 +100,7 @@ META_DATA_2_KEY=foo META_DATA_3_KEY=bar #ensure no additional metadata is set -nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}" +nova aggregate-details $AGGREGATE_ID | egrep "\|[{u ]*'availability_zone.+$AGGREGATE_A_ZONE'[ }]*\|" nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_1_KEY}=123 nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY @@ -117,7 +117,7 @@ nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die $LINENO "ERROR metadata was not cleared" nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY -nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}" +nova aggregate-details $AGGREGATE_ID | egrep "\|[{u ]*'availability_zone.+$AGGREGATE_A_ZONE'[ }]*\|" # Test aggregate-add/remove-host From bd8ac01b02cafba7cfd98364c8f3009c19042da4 Mon Sep 17 00:00:00 2001 From: Mike Perez Date: Tue, 20 Aug 2013 21:53:30 -0700 Subject: [PATCH 0175/4438] Default to Cinder REST API v2 Set OS_VOLUME_API_VERSION environment variable to 2 so we use specifically Cinder REST API v2. v1 is still enabled in the catalog, but we want more exposure to v2 for testing. Change-Id: I6c2f29edf44a0f58a7830fe4dd2db35f2db3658c --- openrc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/openrc b/openrc index 3de7e3958f..d5b215603a 100644 --- a/openrc +++ b/openrc @@ -81,3 +81,8 @@ export OS_CACERT=$INT_CA_DIR/ca-chain.pem export NOVA_VERSION=${NOVA_VERSION:-1.1} # In the future this will change names: export COMPUTE_API_VERSION=${COMPUTE_API_VERSION:-$NOVA_VERSION} + +# Currently cinderclient needs you to specify the *volume api* version. This +# needs to match the config of your catalog returned by Keystone. +export CINDER_VERSION=${CINDER_VERSION:-2} +export OS_VOLUME_API_VERSION=${OS_VOLUME_API_VERSION:-$CINDER_VERSION} From 65f1af6dd3ea97803cbd6f910e5619cca3ac5173 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 16 Oct 2013 12:10:13 -0500 Subject: [PATCH 0176/4438] Fix fixup_stuff.sh package permissions fix There are a number of different attempts to fix this issue, specifcally on RHEL6. None of them actually get it right. * This does not upgrade an OS installed package because we trust them to not make these sorts of permissions mistakes. Also we do not have nor want to figure out the right version that the OpenStack projects will require. * This specfically targets the upstream package versions as we do not know how later versions behave. This should address the following reviews: * https://review.openstack.org/#/c/50540/ * https://review.openstack.org/#/c/51233/ (1238707) * https://review.openstack.org/#/c/51651/ (1239747) * https://review.openstack.org/#/c/51843/ * https://review.openstack.org/#/c/51838/ * https://review.openstack.org/#/c/52148/ (1236941) Change-Id: I99906451dc25654628187b383e8893cce0e276bf --- tools/fixup_stuff.sh | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index f3c0f9810d..9e65b7c21e 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -35,25 +35,35 @@ FILES=$TOP_DIR/files # Python Packages # --------------- -# Pre-install affected packages so we can fix the permissions -pip_install prettytable -pip_install httplib2 +# get_package_path python-package # in import notation +function get_package_path() { + local package=$1 + echo $(python -c "import os; import $package; print(os.path.split(os.path.realpath($package.__file__))[0])") +} -SITE_DIRS=$(python -c "import site; import os; print os.linesep.join(site.getsitepackages())") -for dir in $SITE_DIRS; do - # Fix prettytable 0.7.2 permissions - if [[ -r $dir/prettytable.py ]]; then - sudo chmod +r $dir/prettytable-0.7.2*/* - fi +# Pre-install affected packages so we can fix the permissions +# These can go away once we are confident that pip 1.4.1+ is available everywhere - # Fix httplib2 0.8 permissions - httplib_dir=httplib2-0.8.egg-info - if [[ -d $dir/$httplib_dir ]]; then - sudo chmod +r $dir/$httplib_dir/* - fi +# Fix prettytable 0.7.2 permissions +# Don't specify --upgrade so we use the existing package if present +pip_install prettytable +PACKAGE_DIR=$(get_package_path prettytable) +# Only fix version 0.7.2 +dir=$(echo $PACKAGE_DIR/prettytable-0.7.2*) +if [[ -d $dir ]]; then + sudo chmod +r $dir/* +fi -done +# Fix httplib2 0.8 permissions +# Don't specify --upgrade so we use the existing package if present +pip_install httplib2 +PACKAGE_DIR=$(get_package_path httplib2) +# Only fix version 0.8 +dir=$(echo $PACKAGE_DIR-0.8*) +if [[ -d $dir ]]; then + sudo chmod +r $dir/* +fi # RHEL6 From cbce1fa418ccb271879040b117b96038fefb479f Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 26 Sep 2013 09:20:04 +0000 Subject: [PATCH 0177/4438] Just reset the repo instead of trying to co files We can just reset the repo instead of trying to checkout specific files. This fix external repos which doesn't not have requirements.txt or test-requirements.txt. Closes-Bug: 1231334 Change-Id: Iab898f5e8a422cc0cbfe44839c938f22c7525fd8 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index d969677fc5..92b4ee11cf 100644 --- a/functions +++ b/functions @@ -1257,7 +1257,7 @@ function setup_develop() { # Undo requirements changes, if we made them if [ $update_requirements -eq 0 ]; then - (cd $project_dir && git checkout -- requirements.txt test-requirements.txt setup.py) + (cd $project_dir && git reset --hard) fi } From 8b5d3cf3df65682f94a1885ef71d2fb31bdfb3ba Mon Sep 17 00:00:00 2001 From: Arata Notsu Date: Thu, 17 Oct 2013 21:42:49 +0900 Subject: [PATCH 0178/4438] Do not install pip when OFFLINE=True install_pip.sh trys to fetch pip from the internet even if OFFLINE=True. It causes stack.sh to fail if the environment is actually disconnected from the internet. With this patch, stack.sh skips install_pip.sh if OFFLINE=True. Change-Id: Ica9e5cfa0a4ee684c05393896c2fd6ddbd9ccd06 Closes-Bug: 1240956 --- stack.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index aa0efea487..625eb5f956 100755 --- a/stack.sh +++ b/stack.sh @@ -588,7 +588,9 @@ echo_summary "Installing package prerequisites" source $TOP_DIR/tools/install_prereqs.sh # Configure an appropriate python environment -$TOP_DIR/tools/install_pip.sh +if [[ "$OFFLINE" != "True" ]]; then + $TOP_DIR/tools/install_pip.sh +fi # Do the ugly hacks for borken packages and distros $TOP_DIR/tools/fixup_stuff.sh From 741fc5c08496db1518a7698b093aa1f696f67c4f Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 16 Oct 2013 17:48:16 -0400 Subject: [PATCH 0179/4438] Use nova.conf for auth_token configs. Updates lib/nova so that we use the application config file (nova.conf) instead of the Nova api-paste.ini config file. Related-Bug #1240753 Change-Id: I393a67f1f005e775928130c9241aa7e25c391ae3 --- lib/nova | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/lib/nova b/lib/nova index 8deb3a01a9..5ff5099c6d 100644 --- a/lib/nova +++ b/lib/nova @@ -212,26 +212,24 @@ function configure_nova() { configure_nova_rootwrap if is_service_enabled n-api; then - # Use the sample http middleware configuration supplied in the - # Nova sources. This paste config adds the configuration required - # for Nova to validate Keystone tokens. - # Remove legacy paste config if present rm -f $NOVA_DIR/bin/nova-api-paste.ini # Get the sample configuration file in place cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR - iniset $NOVA_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST + # Comment out the keystone configs in Nova's api-paste.ini. + # We are using nova.conf to configure this instead. + inicomment $NOVA_API_PASTE_INI filter:authtoken auth_host if is_service_enabled tls-proxy; then - iniset $NOVA_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + inicomment $NOVA_API_PASTE_INI filter:authtoken auth_protocol fi - iniset $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $NOVA_API_PASTE_INI filter:authtoken admin_user nova - iniset $NOVA_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD + inicomment $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name + inicomment $NOVA_API_PASTE_INI filter:authtoken admin_user + inicomment $NOVA_API_PASTE_INI filter:authtoken admin_password fi - iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR + inicomment $NOVA_API_PASTE_INI filter:authtoken signing_dir if is_service_enabled n-cpu; then # Force IP forwarding on, just on case @@ -394,7 +392,20 @@ function create_nova_conf() { # Set the service port for a proxy to take the original iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT" fi + + # Add keystone authtoken configuration + + iniset $NOVA_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + if is_service_enabled tls-proxy; then + iniset $NOVA_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + fi + iniset $NOVA_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $NOVA_CONF keystone_authtoken admin_user nova + iniset $NOVA_CONF keystone_authtoken admin_password $SERVICE_PASSWORD fi + + iniset $NOVA_CONF keystone_authtoken signing_dir $NOVA_AUTH_CACHE_DIR + if is_service_enabled cinder; then iniset $NOVA_CONF DEFAULT volume_api_class "nova.volume.cinder.API" fi From 82dea7c64a1a7ac81a1a02753e516bb1d67eebd2 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 16 Oct 2013 18:57:15 -0400 Subject: [PATCH 0180/4438] Use cinder.conf for auth_token configs. Updates lib/cinder so that we use the application config file (cinder.conf) instead of the Cinder api-paste.ini config file. Related-Bug #1240753 Change-Id: I6636d33ee522757145ac97fc354324a8b9379700 --- lib/cinder | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/lib/cinder b/lib/cinder index 220488a07e..f6f137cabd 100644 --- a/lib/cinder +++ b/lib/cinder @@ -202,15 +202,25 @@ function configure_cinder() { sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI - iniset $CINDER_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $CINDER_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $CINDER_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $CINDER_API_PASTE_INI filter:authtoken admin_user cinder - iniset $CINDER_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD - iniset $CINDER_API_PASTE_INI filter:authtoken signing_dir $CINDER_AUTH_CACHE_DIR + + inicomment $CINDER_API_PASTE_INI filter:authtoken auth_host + inicomment $CINDER_API_PASTE_INI filter:authtoken auth_port + inicomment $CINDER_API_PASTE_INI filter:authtoken auth_protocol + inicomment $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name + inicomment $CINDER_API_PASTE_INI filter:authtoken admin_user + inicomment $CINDER_API_PASTE_INI filter:authtoken admin_password + inicomment $CINDER_API_PASTE_INI filter:authtoken signing_dir cp $CINDER_DIR/etc/cinder/cinder.conf.sample $CINDER_CONF + + iniset $CINDER_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $CINDER_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $CINDER_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $CINDER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $CINDER_CONF keystone_authtoken admin_user cinder + iniset $CINDER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + iniset $CINDER_CONF keystone_authtoken signing_dir $CINDER_AUTH_CACHE_DIR + iniset $CINDER_CONF DEFAULT auth_strategy keystone iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $CINDER_CONF DEFAULT verbose True From 6d4a9a87b7aebca2de7bfe034dff630d49f52883 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 14 Oct 2013 16:20:32 +0200 Subject: [PATCH 0181/4438] Don't kill ceilometer prematurally in devstackgate This change ensure that 'ceilometer' processes are not killed by stack.sh when USE_SCREEN=False Fixes bug #1234254 Change-Id: I48dbf18ea0b169cdb5295a709d82c025f6fb8930 --- lib/ceilometer | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 1b0431906a..cd4c4d8656 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -134,12 +134,12 @@ function install_ceilometerclient() { # start_ceilometer() - Start running processes, including screen function start_ceilometer() { - screen_it ceilometer-acompute "sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" - screen_it ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF" - screen_it ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF" - screen_it ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" - screen_it ceilometer-alarm-notifier "ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" - screen_it ceilometer-alarm-evaluator "ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF" + screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" + screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF" + screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF" + screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" + screen_it ceilometer-alarm-notifier "cd ; ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" + screen_it ceilometer-alarm-evaluator "cd ; ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF" } # stop_ceilometer() - Stop running processes From a20c620c7d323b8f489cb20ac64c7ab62c8bb213 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 14 Oct 2013 16:16:35 +0200 Subject: [PATCH 0182/4438] Don't kill sar prematurally in devstack-gate This change ensure that 'sar' is not killed by stack.sh when USE_SCREEN=False Fixes bug #1238482 Change-Id: Id354619a43c27eabbc57f61ba33be2a9493244aa --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index aa0efea487..89a03b5a9b 100755 --- a/stack.sh +++ b/stack.sh @@ -840,7 +840,7 @@ init_service_check # If enabled, systat has to start early to track OpenStack service startup. if is_service_enabled sysstat;then if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it sysstat "sar -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" + screen_it sysstat "cd ; sar -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" else screen_it sysstat "sar $SYSSTAT_INTERVAL" fi From c01e6a789e7e79e735ca3a66965db07622ab3bea Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 14 Oct 2013 16:26:02 +0200 Subject: [PATCH 0183/4438] Don't kill nova-bm-deploy-helper prematurally This change ensure that 'nova-baremetal-deploy-helper' process is not killed by stack.sh when USE_SCREEN=False Change-Id: I84f6f3c3d09bf0cd0d4c5d94eb486a1f7d0b1d0f --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 89a03b5a9b..cf3baa2de0 100755 --- a/stack.sh +++ b/stack.sh @@ -1222,7 +1222,7 @@ if is_service_enabled nova && is_baremetal; then fi # ensure callback daemon is running sudo pkill nova-baremetal-deploy-helper || true - screen_it baremetal "nova-baremetal-deploy-helper" + screen_it baremetal "cd ; nova-baremetal-deploy-helper" fi # Save some values we generated for later use From 8111ef0aa55d2bec3ca958940171a5c9992eaee9 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Fri, 18 Oct 2013 16:21:26 +0200 Subject: [PATCH 0184/4438] Decrease tempest BUILD_TIMEOUT The BUILD_TIMEOUT is the generally used timeout option in tempest. Almost never expected to any operation takes more than 60 sec, the 400 sec is too match for timeout. Changing the BUILD_TIMEOUT to 196 sec, it is still expected to be safe. It can make faster the failing test jobs. Change-Id: I7e7c767400ca448cb86d27b60a1229a2afa69726 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 9f41608187..e8edeb37ca 100644 --- a/lib/tempest +++ b/lib/tempest @@ -48,7 +48,7 @@ TEMPEST_STATE_PATH=${TEMPEST_STATE_PATH:=$DATA_DIR/tempest} NOVA_SOURCE_DIR=$DEST/nova BUILD_INTERVAL=1 -BUILD_TIMEOUT=400 +BUILD_TIMEOUT=196 BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-0.3.1" From 105c6e8718da2db50e48cb4a68be8522a80e101e Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Fri, 18 Oct 2013 15:33:26 +0100 Subject: [PATCH 0185/4438] Create-stack-user script should have execute permissions Currently running stack.sh as root advises you about this script, which is not executable Change-Id: I674af044b8f3c31bcc86be5c6552e8086453d5cd --- tools/create-stack-user.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 tools/create-stack-user.sh diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh old mode 100644 new mode 100755 From f43f3a59c26979f40510b7531b587b029088c871 Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Fri, 11 Oct 2013 23:09:47 -0500 Subject: [PATCH 0186/4438] ./stack.sh complain no /etc/nova/nova.conf If nova.conf doesn't exist, mute error generated by grep. Closes-Bug: #1239044 Change-Id: Ia497e2a9d8395cc11850fb16fd4075af9855b2a5 --- functions | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 01e2dfc8fd..eca0f9be16 100644 --- a/functions +++ b/functions @@ -697,7 +697,8 @@ function iniset() { local section=$2 local option=$3 local value=$4 - if ! grep -q "^\[$section\]" "$file"; then + + if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then # Add section at the end echo -e "\n[$section]" >>"$file" fi From acb52e5db6884e6d2eeef8351306736a70345556 Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Fri, 11 Oct 2013 00:08:29 -0500 Subject: [PATCH 0187/4438] detect failure of 'keystone token-get' When 'keystone token-get' fails, the caller can't detect the failure. This cause troulbe shooting a bit complicated. Change-Id: I3c58c5fd0e92a87e87546ea797904e08646a1097 Closes-Bug: #1238412 --- exercises/neutron-adv-test.sh | 1 + stack.sh | 1 + tools/upload_image.sh | 1 + 3 files changed, 3 insertions(+) diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index abb29cf333..e0c37ef723 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -102,6 +102,7 @@ KEYSTONE="keystone" # and save it. TOKEN=`keystone token-get | grep ' id ' | awk '{print $4}'` +die_if_not_set $LINENO TOKEN "Keystone fail to get token" # Various functions # ----------------- diff --git a/stack.sh b/stack.sh index 7cd7e30d70..024c52ef98 100755 --- a/stack.sh +++ b/stack.sh @@ -1203,6 +1203,7 @@ fi if is_service_enabled g-reg; then TOKEN=$(keystone token-get | grep ' id ' | get_field 2) + die_if_not_set $LINENO TOKEN "Keystone fail to get token" if is_baremetal; then echo_summary "Creating and uploading baremetal images" diff --git a/tools/upload_image.sh b/tools/upload_image.sh index dd21c9f2a8..d81a5c8dab 100755 --- a/tools/upload_image.sh +++ b/tools/upload_image.sh @@ -33,6 +33,7 @@ fi # Get a token to authenticate to glance TOKEN=$(keystone token-get | grep ' id ' | get_field 2) +die_if_not_set $LINENO TOKEN "Keystone fail to get token" # Glance connection info. Note the port must be specified. GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_HOST:9292} From b8dd27bf457d1c7a7ad0f1b3a946529c8a1d073f Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 17 Oct 2013 12:03:55 -0500 Subject: [PATCH 0188/4438] Fix typos and thinkos in docs Updates for the new major features and some clarification Partial-Bug: #1235626 Change-Id: If2da63e62a14894e498b4163b5052d9b2b2069ed --- HACKING.rst | 28 ++++---- README.md | 165 ++++++++++++++++++++++++++++++--------------- extras.d/README.md | 7 +- stack.sh | 2 +- 4 files changed, 131 insertions(+), 71 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index 5f33d770f8..3c08e679d9 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -5,10 +5,10 @@ Contributing to DevStack General ------- -DevStack is written in POSIX shell script. This choice was made because -it best illustrates the configuration steps that this implementation takes -on setting up and interacting with OpenStack components. DevStack specifically -uses Bash and is compatible with Bash 3. +DevStack is written in UNIX shell script. It uses a number of bash-isms +and so is limited to Bash (version 3 and up) and compatible shells. +Shell script was chosen because it best illustrates the steps used to +set up and interact with OpenStack components. DevStack's official repository is located on GitHub at https://github.com/openstack-dev/devstack.git. Besides the master branch that @@ -54,14 +54,14 @@ Sometimes the script needs to know the location of the DevStack install director ``TOP_DIR`` should always point there, even if the script itself is located in a subdirectory:: - # Keep track of the current devstack directory. + # Keep track of the current DevStack directory. TOP_DIR=$(cd $(dirname "$0") && pwd) Many scripts will utilize shared functions from the ``functions`` file. There are also rc files (``stackrc`` and ``openrc``) that are often included to set the primary configuration of the user environment:: - # Keep track of the current devstack directory. + # Keep track of the current DevStack directory. TOP_DIR=$(cd $(dirname "$0") && pwd) # Import common functions @@ -100,13 +100,14 @@ stackrc ------- ``stackrc`` is the global configuration file for DevStack. It is responsible for -calling ``localrc`` if it exists so configuration can be overridden by the user. +calling ``local.conf`` (or ``localrc`` if it exists) so local user configuration +is recognized. The criteria for what belongs in ``stackrc`` can be vaguely summarized as follows: -* All project respositories and branches (for historical reasons) -* Global configuration that may be referenced in ``localrc``, i.e. ``DEST``, ``DATA_DIR`` +* All project repositories and branches handled directly in ``stack.sh`` +* Global configuration that may be referenced in ``local.conf``, i.e. ``DEST``, ``DATA_DIR`` * Global service configuration like ``ENABLED_SERVICES`` * Variables used by multiple services that do not have a clear owner, i.e. ``VOLUME_BACKING_FILE_SIZE`` (nova-volumes and cinder) or ``PUBLIC_NETWORK_NAME`` @@ -116,8 +117,9 @@ follows: not be changed for other reasons but the earlier file needs to dereference a variable set in the later file. This should be rare. -Also, variable declarations in ``stackrc`` do NOT allow overriding (the form -``FOO=${FOO:-baz}``); if they did then they can already be changed in ``localrc`` +Also, variable declarations in ``stackrc`` before ``local.conf`` is sourced +do NOT allow overriding (the form +``FOO=${FOO:-baz}``); if they did then they can already be changed in ``local.conf`` and can stay in the project file. @@ -139,7 +141,9 @@ verbose in the comments _ABOVE_ the code they pertain to. Shocco also supports Markdown formatting in the comments; use it sparingly. Specifically, ``stack.sh`` uses Markdown headers to divide the script into logical sections. -.. _shocco: http://rtomayko.github.com/shocco/ +.. _shocco: https://github.com/dtroyer/shocco/tree/rst_support + +The script used to drive shocco is tools/build_docs.sh. Exercises diff --git a/README.md b/README.md index 514786c60f..640fab65f9 100644 --- a/README.md +++ b/README.md @@ -6,35 +6,39 @@ DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud. * To describe working configurations of OpenStack (which code branches work together? what do config files look like for those branches?) * To make it easier for developers to dive into OpenStack so that they can productively contribute without having to understand every part of the system at once * To make it easy to prototype cross-project features -* To sanity-check OpenStack builds (used in gating commits to the primary repos) +* To provide an environment for the OpenStack CI testing on every commit to the projects -Read more at http://devstack.org (built from the gh-pages branch) +Read more at http://devstack.org. -IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you execute before you run them, as they install software and may alter your networking configuration. We strongly recommend that you run `stack.sh` in a clean and disposable vm when you are first getting started. - -# DevStack on Xenserver - -If you would like to use Xenserver as the hypervisor, please refer to the instructions in `./tools/xen/README.md`. - -# DevStack on Docker - -If you would like to use Docker as the hypervisor, please refer to the instructions in `./tools/docker/README.md`. +IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you +execute before you run them, as they install software and will alter your +networking configuration. We strongly recommend that you run `stack.sh` +in a clean and disposable vm when you are first getting started. # Versions -The devstack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[release] in the DevStack repo. For example, you can do the following to create a diablo OpenStack cloud: +The DevStack master branch generally points to trunk versions of OpenStack +components. For older, stable versions, look for branches named +stable/[release] in the DevStack repo. For example, you can do the +following to create a grizzly OpenStack cloud: - git checkout stable/diablo + git checkout stable/grizzly ./stack.sh -You can also pick specific OpenStack project releases by setting the appropriate `*_BRANCH` variables in `localrc` (look in `stackrc` for the default set). Usually just before a release there will be milestone-proposed branches that need to be tested:: +You can also pick specific OpenStack project releases by setting the appropriate +`*_BRANCH` variables in the ``localrc`` section of `local.conf` (look in +`stackrc` for the default set). Usually just before a release there will be +milestone-proposed branches that need to be tested:: GLANCE_REPO=https://github.com/openstack/glance.git GLANCE_BRANCH=milestone-proposed # Start A Dev Cloud -Installing in a dedicated disposable vm is safer than installing on your dev machine! Plus you can pick one of the supported Linux distros for your VM. To start a dev cloud run the following NOT AS ROOT (see below for more): +Installing in a dedicated disposable VM is safer than installing on your +dev machine! Plus you can pick one of the supported Linux distros for +your VM. To start a dev cloud run the following NOT AS ROOT (see +**DevStack Execution Environment** below for more on user accounts): ./stack.sh @@ -45,7 +49,7 @@ When the script finishes executing, you should be able to access OpenStack endpo We also provide an environment file that you can use to interact with your cloud via CLI: - # source openrc file to load your environment with osapi and ec2 creds + # source openrc file to load your environment with OpenStack CLI creds . openrc # list instances nova list @@ -61,16 +65,37 @@ If the EC2 API is your cup-o-tea, you can create credentials and use euca2ools: DevStack runs rampant over the system it runs on, installing things and uninstalling other things. Running this on a system you care about is a recipe for disappointment, or worse. Alas, we're all in the virtualization business here, so run it in a VM. And take advantage of the snapshot capabilities of your hypervisor of choice to reduce testing cycle times. You might even save enough time to write one more feature before the next feature freeze... -``stack.sh`` needs to have root access for a lot of tasks, but it also needs to have not-root permissions for most of its work and for all of the OpenStack services. So ``stack.sh`` specifically does not run if you are root. This is a recent change (Oct 2013) from the previous behaviour of automatically creating a ``stack`` user. Automatically creating a user account is not always the right response to running as root, so that bit is now an explicit step using ``tools/create-stack-user.sh``. Run that (as root!) if you do not want to just use your normal login here, which works perfectly fine. +``stack.sh`` needs to have root access for a lot of tasks, but uses ``sudo`` +for all of those tasks. However, it needs to be not-root for most of its +work and for all of the OpenStack services. ``stack.sh`` specifically +does not run if started as root. + +This is a recent change (Oct 2013) from the previous behaviour of +automatically creating a ``stack`` user. Automatically creating +user accounts is not the right response to running as root, so +that bit is now an explicit step using ``tools/create-stack-user.sh``. +Run that (as root!) or just check it out to see what DevStack's +expectations are for the account it runs under. Many people simply +use their usual login (the default 'ubuntu' login on a UEC image +for example). # Customizing -You can override environment variables used in `stack.sh` by creating file name `localrc`. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host. +You can override environment variables used in `stack.sh` by creating file +name `local.conf` with a ``locarc`` section as shown below. It is likely +that you will need to do this to tweak your networking configuration should +you need to access your cloud from a different host. + + [[local|localrc]] + VARIABLE=value + +See the **Local Configuration** section below for more details. # Database Backend Multiple database backends are available. The available databases are defined in the lib/databases directory. -`mysql` is the default database, choose a different one by putting the following in `localrc`: +`mysql` is the default database, choose a different one by putting the +following in the `localrc` section: disable_service mysql enable_service postgresql @@ -81,7 +106,7 @@ Multiple database backends are available. The available databases are defined in Multiple RPC backends are available. Currently, this includes RabbitMQ (default), Qpid, and ZeroMQ. Your backend of -choice may be selected via the `localrc`. +choice may be selected via the `localrc` section. Note that selecting more than one RPC backend will result in a failure. @@ -95,9 +120,10 @@ Example (Qpid): # Apache Frontend -Apache web server is enabled for wsgi services by setting `APACHE_ENABLED_SERVICES` in your localrc. But remember to enable these services at first as above. +Apache web server is enabled for wsgi services by setting +`APACHE_ENABLED_SERVICES` in your ``localrc`` section. Remember to +enable these services at first as above. -Example: APACHE_ENABLED_SERVICES+=keystone,swift # Swift @@ -108,23 +134,23 @@ vm. When running with only one replica the account, container and object services will run directly in screen. The others services like replicator, updaters or auditor runs in background. -If you would like to enable Swift you can add this to your `localrc` : +If you would like to enable Swift you can add this to your `localrc` section: enable_service s-proxy s-object s-container s-account If you want a minimal Swift install with only Swift and Keystone you -can have this instead in your `localrc`: +can have this instead in your `localrc` section: disable_all_services enable_service key mysql s-proxy s-object s-container s-account If you only want to do some testing of a real normal swift cluster with multiple replicas you can do so by customizing the variable -`SWIFT_REPLICAS` in your `localrc` (usually to 3). +`SWIFT_REPLICAS` in your `localrc` section (usually to 3). # Swift S3 -If you are enabling `swift3` in `ENABLED_SERVICES` devstack will +If you are enabling `swift3` in `ENABLED_SERVICES` DevStack will install the swift3 middleware emulation. Swift will be configured to act as a S3 endpoint for Keystone so effectively replacing the `nova-objectstore`. @@ -137,7 +163,7 @@ services are started in background and managed by `swift-init` tool. Basic Setup In order to enable Neutron a single node setup, you'll need the -following settings in your `localrc` : +following settings in your `localrc` section: disable_service n-net enable_service q-svc @@ -146,12 +172,15 @@ following settings in your `localrc` : enable_service q-l3 enable_service q-meta enable_service neutron - # Optional, to enable tempest configuration as part of devstack + # Optional, to enable tempest configuration as part of DevStack enable_service tempest Then run `stack.sh` as normal. -devstack supports adding specific Neutron configuration flags to the service, Open vSwitch plugin and LinuxBridge plugin configuration files. To make use of this feature, the following variables are defined and can be configured in your `localrc` file: +DevStack supports setting specific Neutron configuration flags to the +service, Open vSwitch plugin and LinuxBridge plugin configuration files. +To make use of this feature, the following variables are defined and can +be configured in your `localrc` section: Variable Name Config File Section Modified ------------------------------------------------------------------------------------- @@ -160,12 +189,14 @@ devstack supports adding specific Neutron configuration flags to the service, Op Q_AGENT_EXTRA_SRV_OPTS Plugin `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge) Q_SRV_EXTRA_DEFAULT_OPTS Service DEFAULT -An example of using the variables in your `localrc` is below: +An example of using the variables in your `localrc` section is below: Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_type=vxlan vxlan_udp_port=8472) Q_SRV_EXTRA_OPTS=(tenant_network_type=vxlan) -devstack also supports configuring the Neutron ML2 plugin. The ML2 plugin can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. A simple way to configure the ml2 plugin is shown below: +DevStack also supports configuring the Neutron ML2 plugin. The ML2 plugin +can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. A +simple way to configure the ml2 plugin is shown below: # VLAN configuration Q_PLUGIN=ml2 @@ -179,7 +210,9 @@ devstack also supports configuring the Neutron ML2 plugin. The ML2 plugin can ru Q_PLUGIN=ml2 Q_ML2_TENANT_NETWORK_TYPE=vxlan -The above will default in devstack to using the OVS on each compute host. To change this, set the `Q_AGENT` variable to the agent you want to run (e.g. linuxbridge). +The above will default in DevStack to using the OVS on each compute host. +To change this, set the `Q_AGENT` variable to the agent you want to run +(e.g. linuxbridge). Variable Name Notes ------------------------------------------------------------------------------------- @@ -194,13 +227,13 @@ The above will default in devstack to using the OVS on each compute host. To cha # Heat Heat is disabled by default. To enable it you'll need the following settings -in your `localrc` : +in your `localrc` section: enable_service heat h-api h-api-cfn h-api-cw h-eng Heat can also run in standalone mode, and be configured to orchestrate on an external OpenStack cloud. To launch only Heat in standalone mode -you'll need the following settings in your `localrc` : +you'll need the following settings in your `localrc` section: disable_all_services enable_service rabbit mysql heat h-api h-api-cfn h-api-cw h-eng @@ -215,9 +248,23 @@ If tempest has been successfully configured, a basic set of smoke tests can be r $ cd /opt/stack/tempest $ nosetests tempest/scenario/test_network_basic_ops.py +# DevStack on Xenserver + +If you would like to use Xenserver as the hypervisor, please refer to the instructions in `./tools/xen/README.md`. + +# DevStack on Docker + +If you would like to use Docker as the hypervisor, please refer to the instructions in `./tools/docker/README.md`. + # Additional Projects -DevStack has a hook mechanism to call out to a dispatch script at specific points in the execution if `stack.sh`, `unstack.sh` and `clean.sh`. This allows higher-level projects, especially those that the lower level projects have no dependency on, to be added to DevStack without modifying the scripts. Tempest is built this way as an example of how to structure the dispatch script, see `extras.d/80-tempest.sh`. See `extras.d/README.md` for more information. +DevStack has a hook mechanism to call out to a dispatch script at specific +points in the execution of `stack.sh`, `unstack.sh` and `clean.sh`. This +allows upper-layer projects, especially those that the lower layer projects +have no dependency on, to be added to DevStack without modifying the core +scripts. Tempest is built this way as an example of how to structure the +dispatch script, see `extras.d/80-tempest.sh`. See `extras.d/README.md` +for more information. # Multi-Node Setup @@ -232,7 +279,8 @@ You should run at least one "controller node", which should have a `stackrc` tha enable_service q-meta enable_service neutron -You likely want to change your `localrc` to run a scheduler that will balance VMs across hosts: +You likely want to change your `localrc` section to run a scheduler that +will balance VMs across hosts: SCHEDULER=nova.scheduler.simple.SimpleScheduler @@ -249,7 +297,7 @@ You can then run many compute nodes, each of which should have a `stackrc` which Cells is a new scaling option with a full spec at http://wiki.openstack.org/blueprint-nova-compute-cells. -To setup a cells environment add the following to your `localrc`: +To setup a cells environment add the following to your `localrc` section: enable_service n-cell @@ -264,32 +312,41 @@ Historically DevStack has used ``localrc`` to contain all local configuration an The new config file ``local.conf`` is an extended-INI format that introduces a new meta-section header that provides some additional information such as a phase name and destination config filename: - [[ | ]] + [[ | ]] -where is one of a set of phase names defined by ``stack.sh`` and is the project config filename. The filename is eval'ed in the stack.sh context so all environment variables are available and may be used. Using the project config file variables in the header is strongly suggested (see example of NOVA_CONF below). If the path of the config file does not exist it is skipped. +where ```` is one of a set of phase names defined by ``stack.sh`` +and ```` is the configuration filename. The filename is +eval'ed in the ``stack.sh`` context so all environment variables are +available and may be used. Using the project config file variables in +the header is strongly suggested (see the ``NOVA_CONF`` example below). +If the path of the config file does not exist it is skipped. The defined phases are: -* local - extracts ``localrc`` from ``local.conf`` before ``stackrc`` is sourced -* post-config - runs after the layer 2 services are configured and before they are started -* extra - runs after services are started and before any files in ``extra.d`` are executes +* **local** - extracts ``localrc`` from ``local.conf`` before ``stackrc`` is sourced +* **post-config** - runs after the layer 2 services are configured and before they are started +* **extra** - runs after services are started and before any files in ``extra.d`` are executed The file is processed strictly in sequence; meta-sections may be specified more than once but if any settings are duplicated the last to appear in the file will be used. - [[post-config|$NOVA_CONF]] - [DEFAULT] - use_syslog = True + [[post-config|$NOVA_CONF]] + [DEFAULT] + use_syslog = True - [osapi_v3] - enabled = False + [osapi_v3] + enabled = False -A specific meta-section ``local:localrc`` is used to provide a default localrc file. This allows all custom settings for DevStack to be contained in a single file. ``localrc`` is not overwritten if it exists to preserve compatability. +A specific meta-section ``local|localrc`` is used to provide a default +``localrc`` file (actually ``.localrc.auto``). This allows all custom +settings for DevStack to be contained in a single file. If ``localrc`` +exists it will be used instead to preserve backward-compatibility. - [[local|localrc]] - FIXED_RANGE=10.254.1.0/24 - ADMIN_PASSWORD=speciale - LOGFILE=$DEST/logs/stack.sh.log + [[local|localrc]] + FIXED_RANGE=10.254.1.0/24 + ADMIN_PASSWORD=speciale + LOGFILE=$DEST/logs/stack.sh.log -Note that ``Q_PLUGIN_CONF_FILE`` is unique in that it is assumed to _NOT_ start with a ``/`` (slash) character. A slash will need to be added: +Note that ``Q_PLUGIN_CONF_FILE`` is unique in that it is assumed to *NOT* +start with a ``/`` (slash) character. A slash will need to be added: - [[post-config|/$Q_PLUGIN_CONF_FILE]] + [[post-config|/$Q_PLUGIN_CONF_FILE]] diff --git a/extras.d/README.md b/extras.d/README.md index 591e438b02..88e4265ced 100644 --- a/extras.d/README.md +++ b/extras.d/README.md @@ -10,12 +10,11 @@ that end with `.sh`. To control the order that the scripts are sourced their names start with a two digit sequence number. DevStack reserves the sequence numbers 00 through 09 and 90 through 99 for its own use. -The scripts are sourced at each hook point so they should not declare anything -at the top level that would cause a problem, specifically, functions. This does -allow the entire `stack.sh` variable space to be available. The scripts are +The scripts are sourced at the beginning of each script that calls them. The +entire `stack.sh` variable space is available. The scripts are sourced with one or more arguments, the first of which defines the hook phase: -arg 1: source | stack | unstack | clean + source | stack | unstack | clean source: always called first in any of the scripts, used to set the initial defaults in a lib/* script or similar diff --git a/stack.sh b/stack.sh index aa0efea487..b3380a8775 100755 --- a/stack.sh +++ b/stack.sh @@ -53,7 +53,7 @@ if [[ -r $TOP_DIR/local.conf ]]; then if [[ -r $TOP_DIR/localrc ]]; then warn $LINENO "localrc and local.conf:[[local]] both exist, using localrc" else - echo "# Generated file, do not exit" >$TOP_DIR/.localrc.auto + echo "# Generated file, do not edit" >$TOP_DIR/.localrc.auto get_meta_section $TOP_DIR/local.conf local $lfile >>$TOP_DIR/.localrc.auto fi fi From 2e159460126febc8be6d65477cc94ef6ef159649 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 21 Oct 2013 13:06:11 -0700 Subject: [PATCH 0189/4438] Allow starting nova-compute manually This breaks out the code that starts nova-compute into a separate function. This will be used for upgrade testing so that we can arrange for a nova-compute running on a different version of the code to be running alongside the rest of the stack. Change-Id: I88687cefdac7fa4a3c45789461a95fd8d061aba6 --- lib/nova | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/lib/nova b/lib/nova index 5ff5099c6d..09c6a50dd5 100644 --- a/lib/nova +++ b/lib/nova @@ -610,20 +610,10 @@ function start_nova_api() { fi } -# start_nova() - Start running processes, including screen -function start_nova() { +# start_nova_compute() - Start the compute process +function start_nova_compute() { NOVA_CONF_BOTTOM=$NOVA_CONF - # ``screen_it`` checks ``is_service_enabled``, it is not needed here - screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor" - - if is_service_enabled n-cell; then - NOVA_CONF_BOTTOM=$NOVA_CELLS_CONF - screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CELLS_CONF" - screen_it n-cell-region "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CONF" - screen_it n-cell-child "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CELLS_CONF" - fi - if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then # The group **$LIBVIRT_GROUP** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. @@ -639,6 +629,22 @@ function start_nova() { fi screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM" fi +} + +# start_nova() - Start running processes, including screen +function start_nova_rest() { + NOVA_CONF_BOTTOM=$NOVA_CONF + + # ``screen_it`` checks ``is_service_enabled``, it is not needed here + screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor" + + if is_service_enabled n-cell; then + NOVA_CONF_BOTTOM=$NOVA_CELLS_CONF + screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CELLS_CONF" + screen_it n-cell-region "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CONF" + screen_it n-cell-child "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CELLS_CONF" + fi + screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network --config-file $NOVA_CONF_BOTTOM" screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $NOVA_CONF_BOTTOM" @@ -655,6 +661,11 @@ function start_nova() { screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore" } +function start_nova() { + start_nova_compute + start_nova_rest +} + # stop_nova() - Stop running processes (non-screen) function stop_nova() { # Kill the nova screen windows From a45a0a0276f542ef5d624067e98dfa2de830fd84 Mon Sep 17 00:00:00 2001 From: Denis Egorenko Date: Tue, 1 Oct 2013 16:03:39 +0000 Subject: [PATCH 0190/4438] Added Savanna Project Added services Savanna, Savanna Dashboard, Savanna python client. Implements blueprint devstack-savanna-support Implements blueprint devstack-integration Change-Id: I8725f59a0cc9aef4817988470313136c56711cf1 --- exercises/savanna.sh | 43 +++++++++++++++++++ extras.d/70-savanna.sh | 31 ++++++++++++++ lib/savanna | 97 ++++++++++++++++++++++++++++++++++++++++++ lib/savanna-dashboard | 70 ++++++++++++++++++++++++++++++ 4 files changed, 241 insertions(+) create mode 100755 exercises/savanna.sh create mode 100644 extras.d/70-savanna.sh create mode 100644 lib/savanna create mode 100644 lib/savanna-dashboard diff --git a/exercises/savanna.sh b/exercises/savanna.sh new file mode 100755 index 0000000000..fc3f9760e5 --- /dev/null +++ b/exercises/savanna.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +# **savanna.sh** + +# Sanity check that Savanna started if enabled + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occurred. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +is_service_enabled savanna || exit 55 + +curl http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Savanna API not functioning!" + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" diff --git a/extras.d/70-savanna.sh b/extras.d/70-savanna.sh new file mode 100644 index 0000000000..f6881cc4f6 --- /dev/null +++ b/extras.d/70-savanna.sh @@ -0,0 +1,31 @@ +# savanna.sh - DevStack extras script to install Savanna + +if is_service_enabled savanna; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/savanna + source $TOP_DIR/lib/savanna-dashboard + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + echo_summary "Installing Savanna" + install_savanna + if is_service_enabled horizon; then + install_savanna_dashboard + fi + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + echo_summary "Configuring Savanna" + configure_savanna + if is_service_enabled horizon; then + configure_savanna_dashboard + fi + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + echo_summary "Initializing Savanna" + start_savanna + fi + + if [[ "$1" == "unstack" ]]; then + stop_savanna + if is_service_enabled horizon; then + cleanup_savanna_dashboard + fi + fi +fi diff --git a/lib/savanna b/lib/savanna new file mode 100644 index 0000000000..e9dbe72643 --- /dev/null +++ b/lib/savanna @@ -0,0 +1,97 @@ +# lib/savanna + +# Dependencies: +# ``functions`` file +# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# ``ADMIN_{TENANT_NAME|PASSWORD}`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# install_savanna +# configure_savanna +# start_savanna +# stop_savanna + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default repos +SAVANNA_REPO=${SAVANNA_REPO:-${GIT_BASE}/openstack/savanna.git} +SAVANNA_BRANCH=${SAVANNA_BRANCH:-master} + +# Set up default directories +SAVANNA_DIR=$DEST/savanna +SAVANNA_CONF_DIR=${SAVANNA_CONF_DIR:-/etc/savanna} +SAVANNA_CONF_FILE=savanna.conf +ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin} +ADMIN_NAME=${ADMIN_NAME:-admin} +ADMIN_PASSWORD=${ADMIN_PASSWORD:-nova} +SAVANNA_DEBUG=${SAVANNA_DEBUG:-True} + +# Support entry points installation of console scripts +if [[ -d $SAVANNA_DIR/bin ]]; then + SAVANNA_BIN_DIR=$SAVANNA_DIR/bin +else + SAVANNA_BIN_DIR=$(get_python_exec_prefix) +fi + +# Functions +# --------- + +# configure_savanna() - Set config files, create data dirs, etc +function configure_savanna() { + + if [[ ! -d $SAVANNA_CONF_DIR ]]; then + sudo mkdir -p $SAVANNA_CONF_DIR + fi + sudo chown $STACK_USER $SAVANNA_CONF_DIR + + # Copy over savanna configuration file and configure common parameters. + cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE + + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_password $ADMIN_PASSWORD + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_username $ADMIN_NAME + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $ADMIN_TENANT_NAME + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG + + recreate_database savanna utf8 + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database sql_connection `database_connection_url savanna` + inicomment $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database connection + + if is_service_enabled neutron; then + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_neutron true + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_floating_ips true + fi + + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG +} + +# install_savanna() - Collect source and prepare +function install_savanna() { + git_clone $SAVANNA_REPO $SAVANNA_DIR $SAVANNA_BRANCH + setup_develop $SAVANNA_DIR +} + +# start_savanna() - Start running processes, including screen +function start_savanna() { + screen_it savanna "cd $SAVANNA_DIR && $SAVANNA_BIN_DIR/savanna-api --config-file $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE" +} + +# stop_savanna() - Stop running processes +function stop_savanna() { + # Kill the Savanna screen windows + screen -S $SCREEN_NAME -p savanna -X kill +} + + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard new file mode 100644 index 0000000000..9562db4e1c --- /dev/null +++ b/lib/savanna-dashboard @@ -0,0 +1,70 @@ +# lib/savanna-dashboard + +# Dependencies: +# ``functions`` file +# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# ``SERVICE_HOST + +# ``stack.sh`` calls the entry points in this order: +# +# install_savanna_dashboard +# configure_savanna_dashboard +# cleanup_savanna_dashboard + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/horizon + +# Defaults +# -------- + +# Set up default repos +SAVANNA_DASHBOARD_REPO=${SAVANNA_DASHBOARD_REPO:-${GIT_BASE}/openstack/savanna-dashboard.git} +SAVANNA_DASHBOARD_BRANCH=${SAVANNA_DASHBOARD_BRANCH:-master} + +SAVANNA_PYTHONCLIENT_REPO=${SAVANNA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-savannaclient.git} +SAVANNA_PYTHONCLIENT_BRANCH=${SAVANNA_PYTHONCLIENT_BRANCH:-master} + +# Set up default directories +SAVANNA_DASHBOARD_DIR=$DEST/savanna_dashboard +SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient + +# Functions +# --------- + +function configure_savanna_dashboard() { + + echo -e "SAVANNA_URL = \"http://$SERVICE_HOST:8386/v1.1\"\nAUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py + echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)\nINSTALLED_APPS += ('savannadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py + + if is_service_enabled neutron; then + echo -e "SAVANNA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py + fi +} + +# install_savanna_dashboard() - Collect source and prepare +function install_savanna_dashboard() { + install_python_savannaclient + git_clone $SAVANNA_DASHBOARD_REPO $SAVANNA_DASHBOARD_DIR $SAVANNA_DASHBOARD_BRANCH + setup_develop $SAVANNA_DASHBOARD_DIR +} + +function install_python_savannaclient() { + git_clone $SAVANNA_PYTHONCLIENT_REPO $SAVANNA_PYTHONCLIENT_DIR $SAVANNA_PYTHONCLIENT_BRANCH + setup_develop $SAVANNA_PYTHONCLIENT_DIR +} + +# Cleanup file settings.py from Savanna +function cleanup_savanna_dashboard() { + sed -i '/savanna/d' $HORIZON_DIR/openstack_dashboard/settings.py +} + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: + From e095daa73267cedbd3cc7b68f517bbe0624f770e Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 29 Aug 2013 15:45:20 -0400 Subject: [PATCH 0191/4438] Make nova use fatal_deprecations=true We should not be using deprecated config options here, so lets set fatal_deprecations=True to make sure. Stop using deprecated LibvirtHybridOVSBridgeDriver Change-Id: I0a43780270d092a42ede6c0667343f0d02b3aa67 --- lib/neutron_plugins/ovs_base | 8 +------- lib/nova | 1 + 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 2666d8e8ba..1214f3bcbd 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -73,13 +73,7 @@ function _neutron_ovs_base_configure_l3_agent() { } function _neutron_ovs_base_configure_nova_vif_driver() { - # The hybrid VIF driver needs to be specified when Neutron Security Group - # is enabled (until vif_security attributes are supported in VIF extension) - if [[ "$Q_USE_SECGROUP" == "True" ]]; then - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} - else - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} - fi + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} } # Restore xtrace diff --git a/lib/nova b/lib/nova index 5ff5099c6d..615683437d 100644 --- a/lib/nova +++ b/lib/nova @@ -377,6 +377,7 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT ec2_workers "4" iniset $NOVA_CONF DEFAULT metadata_workers "4" iniset $NOVA_CONF DEFAULT sql_connection `database_connection_url nova` + iniset $NOVA_CONF DEFAULT fatal_deprecations "True" iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF osapi_v3 enabled "True" From dc30bd3eb457aaea66451621695cddfa8213a169 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Oct 2013 07:30:47 -0400 Subject: [PATCH 0192/4438] exit cleanup in functions we should always use die instead of exit so that we know why we failed. Also remove instances where exit is called after die, as that is a noop. Change-Id: I8e08cce63d35c503c36ff1e09805f3db427d082d --- functions | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/functions b/functions index d969677fc5..0aef47ef78 100644 --- a/functions +++ b/functions @@ -580,7 +580,8 @@ function git_clone { if echo $GIT_REF | egrep -q "^refs"; then # If our branch name is a gerrit style refs/changes/... if [[ ! -d $GIT_DEST ]]; then - [[ "$ERROR_ON_CLONE" = "True" ]] && exit 1 + [[ "$ERROR_ON_CLONE" = "True" ]] && \ + die $LINENO "Cloning not allowed in this configuration" git clone $GIT_REMOTE $GIT_DEST fi cd $GIT_DEST @@ -588,7 +589,8 @@ function git_clone { else # do a full clone only if the directory doesn't exist if [[ ! -d $GIT_DEST ]]; then - [[ "$ERROR_ON_CLONE" = "True" ]] && exit 1 + [[ "$ERROR_ON_CLONE" = "True" ]] && \ + die $LINENO "Cloning not allowed in this configuration" git clone $GIT_REMOTE $GIT_DEST cd $GIT_DEST # This checkout syntax works for both branches and tags @@ -612,8 +614,7 @@ function git_clone { elif [[ -n "`git show-ref refs/remotes/origin/$GIT_REF`" ]]; then git_update_remote_branch $GIT_REF else - echo $GIT_REF is neither branch nor tag - exit 1 + die $LINENO "$GIT_REF is neither branch nor tag" fi fi @@ -1562,7 +1563,6 @@ function _ping_check_novanet() { else die $LINENO "[Fail] Could ping server" fi - exit 1 fi } @@ -1575,7 +1575,6 @@ function get_instance_ip(){ if [[ $ip = "" ]];then echo "$nova_result" die $LINENO "[Fail] Coudn't get ipaddress of VM" - exit 1 fi echo $ip } From 6832272a1816238d6671865771b92691dc65a205 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 21 Oct 2013 18:11:40 -0400 Subject: [PATCH 0193/4438] add lib/stackforge to let us test wsme / pecan from git wsme and pecan libraries have migrated to stackforge for development. If we support them in devstack, we can use their git version instead of the release version, which ensures that they won't break the rest of OpenStack when they cut a new release. This is similar to how oslo testing works. Long term we probably want a more generic mechanism to handle this, but for now, this should get us rolling, and get them gating. Change-Id: Icf3475f433081c7c625864107d7e118e214396e1 --- lib/stackforge | 67 ++++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 4 +++ stackrc | 10 ++++++++ 3 files changed, 81 insertions(+) create mode 100644 lib/stackforge diff --git a/lib/stackforge b/lib/stackforge new file mode 100644 index 0000000000..4b79de0c94 --- /dev/null +++ b/lib/stackforge @@ -0,0 +1,67 @@ +# lib/stackforge +# +# Functions to install stackforge libraries that we depend on so +# that we can try their git versions during devstack gate. +# +# This is appropriate for python libraries that release to pypi and are +# expected to be used beyond OpenStack like, but are requirements +# for core services in global-requirements. +# * wsme +# * pecan +# +# This is not appropriate for stackforge projects which are early stage +# OpenStack tools + +# Dependencies: +# ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# install_stackforge + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- +WSME_DIR=$DEST/wsme +PECAN_DIR=$DEST/pecan + +# Entry Points +# ------------ + +# install_stackforge() - Collect source and prepare +function install_stackforge() { + # TODO(sdague): remove this once we get to Icehouse, this just makes + # for a smoother transition of existing users. + cleanup_stackforge + + git_clone $WSME_REPO $WSME_DIR $WSME_BRANCH + setup_develop $WSME_DIR + + git_clone $PECAN_REPO $PECAN_DIR $PECAN_BRANCH + setup_develop $PECAN_DIR +} + +# cleanup_stackforge() - purge possibly old versions of stackforge libraries +function cleanup_stackforge() { + # this means we've got an old version installed, lets get rid of it + # otherwise python hates itself + for lib in wsme pecan; do + if ! python -c "import $lib" 2>/dev/null; then + echo "Found old $lib... removing to ensure consistency" + local PIP_CMD=$(get_pip_command) + pip_install $lib + sudo $PIP_CMD uninstall -y $lib + fi + done +} + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index aa0efea487..df5cd4cc47 100755 --- a/stack.sh +++ b/stack.sh @@ -299,6 +299,7 @@ source $TOP_DIR/lib/apache source $TOP_DIR/lib/tls source $TOP_DIR/lib/infra source $TOP_DIR/lib/oslo +source $TOP_DIR/lib/stackforge source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance @@ -627,6 +628,9 @@ install_infra # Install oslo libraries that have graduated install_oslo +# Install stackforge libraries for testing +install_stackforge + # Install clients libraries install_keystoneclient install_glanceclient diff --git a/stackrc b/stackrc index 3f740b5678..b9d636a2b3 100644 --- a/stackrc +++ b/stackrc @@ -193,6 +193,16 @@ TROVE_BRANCH=${TROVE_BRANCH:-master} TROVECLIENT_REPO=${TROVECLIENT_REPO:-${GIT_BASE}/openstack/python-troveclient.git} TROVECLIENT_BRANCH=${TROVECLIENT_BRANCH:-master} +# stackforge libraries that are used by OpenStack core services +# wsme +WSME_REPO=${WSME_REPO:-${GIT_BASE}/stackforge/wsme.git} +WSME_BRANCH=${WSME_BRANCH:-master} + +# pecan +PECAN_REPO=${PECAN_REPO:-${GIT_BASE}/stackforge/pecan.git} +PECAN_BRANCH=${PECAN_BRANCH:-master} + + # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can # also install an **LXC**, **OpenVZ** or **XenAPI** based system. If xenserver-core From 537d4025c511d9b162726bb5c972da72028573ed Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Oct 2013 07:43:22 -0400 Subject: [PATCH 0194/4438] whitespace cleanup on functions & lib/config fix some of the bash8 issues found in functions and lib/config, part of the long march towards fixing all the bash8 issues. Change-Id: Ia131f64870acb0f9d196fe1a9a45d633abb6fc4d --- functions | 50 +++++++++++++++++++++++++------------------------- lib/config | 14 +++++++------- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/functions b/functions index 4afebe020e..0ab2afcafa 100644 --- a/functions +++ b/functions @@ -1372,9 +1372,9 @@ function upload_image() { IMAGE="$FILES/${IMAGE_FNAME}" IMAGE_NAME="${IMAGE_FNAME%.xen-raw.tgz}" glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ --name "$IMAGE_NAME" --is-public=True \ --container-format=tgz --disk-format=raw \ --property vm_mode=xen < "${IMAGE}" @@ -1397,11 +1397,11 @@ function upload_image() { mkdir "$xdir" tar -zxf $FILES/$IMAGE_FNAME -C "$xdir" KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) + [ -f "$f" ] && echo "$f" && break; done; true) RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) + [ -f "$f" ] && echo "$f" && break; done; true) IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) + [ -f "$f" ] && echo "$f" && break; done; true) if [[ -z "$IMAGE_NAME" ]]; then IMAGE_NAME=$(basename "$IMAGE" ".img") fi @@ -1692,23 +1692,23 @@ function check_path_perm_sanity() { # # _vercmp_r sep ver1 ver2 function _vercmp_r { - typeset sep - typeset -a ver1=() ver2=() - sep=$1; shift - ver1=("${@:1:sep}") - ver2=("${@:sep+1}") + typeset sep + typeset -a ver1=() ver2=() + sep=$1; shift + ver1=("${@:1:sep}") + ver2=("${@:sep+1}") - if ((ver1 > ver2)); then - echo 1; return 0 - elif ((ver2 > ver1)); then - echo -1; return 0 - fi + if ((ver1 > ver2)); then + echo 1; return 0 + elif ((ver2 > ver1)); then + echo -1; return 0 + fi - if ((sep <= 1)); then - echo 0; return 0 - fi + if ((sep <= 1)); then + echo 0; return 0 + fi - _vercmp_r $((sep-1)) "${ver1[@]:1}" "${ver2[@]:1}" + _vercmp_r $((sep-1)) "${ver1[@]:1}" "${ver2[@]:1}" } @@ -1730,13 +1730,13 @@ function _vercmp_r { # # vercmp_numbers ver1 ver2 vercmp_numbers() { - typeset v1=$1 v2=$2 sep - typeset -a ver1 ver2 + typeset v1=$1 v2=$2 sep + typeset -a ver1 ver2 - IFS=. read -ra ver1 <<< "$v1" - IFS=. read -ra ver2 <<< "$v2" + IFS=. read -ra ver1 <<< "$v1" + IFS=. read -ra ver2 <<< "$v2" - _vercmp_r "${#ver1[@]}" "${ver1[@]}" "${ver2[@]}" + _vercmp_r "${#ver1[@]}" "${ver1[@]}" "${ver2[@]}" } diff --git a/lib/config b/lib/config index 6f686e9b5d..91cefe48cc 100644 --- a/lib/config +++ b/lib/config @@ -10,7 +10,7 @@ # [[group-name|file-name]] # # group-name refers to the group of configuration file changes to be processed -# at a particular time. These are called phases in ``stack.sh`` but +# at a particular time. These are called phases in ``stack.sh`` but # group here as these functions are not DevStack-specific. # # file-name is the destination of the config file @@ -64,12 +64,12 @@ function get_meta_section_files() { [[ -r $file ]] || return 0 $CONFIG_AWK_CMD -v matchgroup=$matchgroup ' - /^\[\[.+\|.*\]\]/ { - gsub("[][]", "", $1); - split($1, a, "|"); - if (a[1] == matchgroup) - print a[2] - } + /^\[\[.+\|.*\]\]/ { + gsub("[][]", "", $1); + split($1, a, "|"); + if (a[1] == matchgroup) + print a[2] + } ' $file } From 3bdb922c4054a55f03b3db94721997e52415e76d Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Oct 2013 08:36:16 -0400 Subject: [PATCH 0195/4438] fix bash8 indent problems in lib/neutron and friends Change-Id: Ia83ce84b792494800fbfe7baa6423c8de9260014 --- lib/neutron | 26 +++++++++++++------------- lib/neutron_plugins/midonet | 4 ++-- lib/neutron_plugins/nec | 18 +++++++++--------- lib/neutron_plugins/nicira | 8 ++++---- 4 files changed, 28 insertions(+), 28 deletions(-) diff --git a/lib/neutron b/lib/neutron index 778717d7a9..44fb9e1005 100644 --- a/lib/neutron +++ b/lib/neutron @@ -79,8 +79,8 @@ NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} # Support entry points installation of console scripts if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then NEUTRON_BIN_DIR=$NEUTRON_DIR/bin - else -NEUTRON_BIN_DIR=$(get_python_exec_prefix) +else + NEUTRON_BIN_DIR=$(get_python_exec_prefix) fi NEUTRON_CONF_DIR=/etc/neutron @@ -373,7 +373,7 @@ function create_neutron_initial_network() { iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID fi fi - fi + fi } # init_neutron() - Initialize databases, etc. @@ -404,7 +404,7 @@ function install_neutron_agent_packages() { fi if is_service_enabled q-lbaas; then - neutron_agent_lbaas_install_agent_packages + neutron_agent_lbaas_install_agent_packages fi } @@ -414,13 +414,13 @@ function start_neutron_service_and_check() { local cfg_file local CFG_FILE_OPTIONS="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do - CFG_FILE_OPTIONS+=" --config-file /$cfg_file" + CFG_FILE_OPTIONS+=" --config-file /$cfg_file" done # Start the Neutron service screen_it q-svc "cd $NEUTRON_DIR && python $NEUTRON_BIN_DIR/neutron-server $CFG_FILE_OPTIONS" echo "Waiting for Neutron to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then - die $LINENO "Neutron did not start" + die $LINENO "Neutron did not start" fi } @@ -712,9 +712,9 @@ function _neutron_setup_rootwrap() { # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` # location moved in newer versions, prefer new location if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then - sudo cp -p $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE + sudo cp -p $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE else - sudo cp -p $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE + sudo cp -p $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE fi sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE sudo chown root:root $Q_RR_CONF_FILE @@ -848,11 +848,11 @@ function _ssh_check_neutron() { # please refer to ``lib/neutron_thirdparty/README.md`` for details NEUTRON_THIRD_PARTIES="" for f in $TOP_DIR/lib/neutron_thirdparty/*; do - third_party=$(basename $f) - if is_service_enabled $third_party; then - source $TOP_DIR/lib/neutron_thirdparty/$third_party - NEUTRON_THIRD_PARTIES="$NEUTRON_THIRD_PARTIES,$third_party" - fi + third_party=$(basename $f) + if is_service_enabled $third_party; then + source $TOP_DIR/lib/neutron_thirdparty/$third_party + NEUTRON_THIRD_PARTIES="$NEUTRON_THIRD_PARTIES,$third_party" + fi done function _neutron_third_party_do() { diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet index 074f847330..cf45a9d11d 100644 --- a/lib/neutron_plugins/midonet +++ b/lib/neutron_plugins/midonet @@ -52,11 +52,11 @@ function neutron_plugin_configure_dhcp_agent() { } function neutron_plugin_configure_l3_agent() { - die $LINENO "q-l3 must not be executed with MidoNet plugin!" + die $LINENO "q-l3 must not be executed with MidoNet plugin!" } function neutron_plugin_configure_plugin_agent() { - die $LINENO "q-agt must not be executed with MidoNet plugin!" + die $LINENO "q-agt must not be executed with MidoNet plugin!" } function neutron_plugin_configure_service() { diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec index 79d41dbf77..3806c32c75 100644 --- a/lib/neutron_plugins/nec +++ b/lib/neutron_plugins/nec @@ -101,15 +101,15 @@ function _neutron_setup_ovs_tunnels() { local id=0 GRE_LOCAL_IP=${GRE_LOCAL_IP:-$HOST_IP} if [ -n "$GRE_REMOTE_IPS" ]; then - for ip in ${GRE_REMOTE_IPS//:/ } - do - if [[ "$ip" == "$GRE_LOCAL_IP" ]]; then - continue - fi - sudo ovs-vsctl --no-wait add-port $bridge gre$id -- \ - set Interface gre$id type=gre options:remote_ip=$ip - id=`expr $id + 1` - done + for ip in ${GRE_REMOTE_IPS//:/ } + do + if [[ "$ip" == "$GRE_LOCAL_IP" ]]; then + continue + fi + sudo ovs-vsctl --no-wait add-port $bridge gre$id -- \ + set Interface gre$id type=gre options:remote_ip=$ip + id=`expr $id + 1` + done fi } diff --git a/lib/neutron_plugins/nicira b/lib/neutron_plugins/nicira index 082c84674d..7c99b692d6 100644 --- a/lib/neutron_plugins/nicira +++ b/lib/neutron_plugins/nicira @@ -58,13 +58,13 @@ function neutron_plugin_configure_dhcp_agent() { } function neutron_plugin_configure_l3_agent() { - # Nicira plugin does not run L3 agent - die $LINENO "q-l3 should must not be executed with Nicira plugin!" + # Nicira plugin does not run L3 agent + die $LINENO "q-l3 should must not be executed with Nicira plugin!" } function neutron_plugin_configure_plugin_agent() { - # Nicira plugin does not run L2 agent - die $LINENO "q-agt must not be executed with Nicira plugin!" + # Nicira plugin does not run L2 agent + die $LINENO "q-agt must not be executed with Nicira plugin!" } function neutron_plugin_configure_service() { From 101b4248428b4c3d7757e15ff4e19d3b4f85a51f Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Oct 2013 08:47:11 -0400 Subject: [PATCH 0196/4438] fix whitespace in the rest of lib/* this brings this in line with bash8 checker Change-Id: Ib34a2292dd5bc259069457461041ec9cd4fd2957 --- lib/baremetal | 100 +++++++++++++------------- lib/glance | 2 +- lib/ironic | 2 +- lib/keystone | 2 +- lib/neutron_thirdparty/trema | 4 +- lib/nova | 41 ++++++----- lib/nova_plugins/hypervisor-baremetal | 4 +- lib/nova_plugins/hypervisor-libvirt | 8 +-- lib/rpc_backend | 6 +- lib/swift | 64 ++++++++--------- lib/tempest | 20 +++--- lib/trove | 15 ++-- 12 files changed, 134 insertions(+), 134 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index f4d8589628..141c28d15f 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -256,19 +256,19 @@ function upload_baremetal_deploy() { # load them into glance BM_DEPLOY_KERNEL_ID=$(glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name $BM_DEPLOY_KERNEL \ - --is-public True --disk-format=aki \ - < $TOP_DIR/files/$BM_DEPLOY_KERNEL | grep ' id ' | get_field 2) + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $BM_DEPLOY_KERNEL \ + --is-public True --disk-format=aki \ + < $TOP_DIR/files/$BM_DEPLOY_KERNEL | grep ' id ' | get_field 2) BM_DEPLOY_RAMDISK_ID=$(glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name $BM_DEPLOY_RAMDISK \ - --is-public True --disk-format=ari \ - < $TOP_DIR/files/$BM_DEPLOY_RAMDISK | grep ' id ' | get_field 2) + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $BM_DEPLOY_RAMDISK \ + --is-public True --disk-format=ari \ + < $TOP_DIR/files/$BM_DEPLOY_RAMDISK | grep ' id ' | get_field 2) } # create a basic baremetal flavor, associated with deploy kernel & ramdisk @@ -278,11 +278,11 @@ function create_baremetal_flavor() { aki=$1 ari=$2 nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \ - $BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU + $BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU nova flavor-key $BM_FLAVOR_NAME set \ - "cpu_arch"="$BM_FLAVOR_ARCH" \ - "baremetal:deploy_kernel_id"="$aki" \ - "baremetal:deploy_ramdisk_id"="$ari" + "cpu_arch"="$BM_FLAVOR_ARCH" \ + "baremetal:deploy_kernel_id"="$aki" \ + "baremetal:deploy_ramdisk_id"="$ari" } @@ -311,19 +311,19 @@ function extract_and_upload_k_and_r_from_image() { # load them into glance KERNEL_ID=$(glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name $image_name-kernel \ - --is-public True --disk-format=aki \ - < $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2) + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $image_name-kernel \ + --is-public True --disk-format=aki \ + < $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2) RAMDISK_ID=$(glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name $image_name-initrd \ - --is-public True --disk-format=ari \ - < $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2) + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $image_name-initrd \ + --is-public True --disk-format=ari \ + < $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2) } @@ -365,11 +365,11 @@ function upload_baremetal_image() { mkdir "$xdir" tar -zxf $FILES/$IMAGE_FNAME -C "$xdir" KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) + [ -f "$f" ] && echo "$f" && break; done; true) RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) + [ -f "$f" ] && echo "$f" && break; done; true) IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) + [ -f "$f" ] && echo "$f" && break; done; true) if [[ -z "$IMAGE_NAME" ]]; then IMAGE_NAME=$(basename "$IMAGE" ".img") fi @@ -403,19 +403,19 @@ function upload_baremetal_image() { --container-format ari \ --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) else - # TODO(deva): add support for other image types - return + # TODO(deva): add support for other image types + return fi glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name "${IMAGE_NAME%.img}" --is-public True \ - --container-format $CONTAINER_FORMAT \ - --disk-format $DISK_FORMAT \ - ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \ - ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name "${IMAGE_NAME%.img}" --is-public True \ + --container-format $CONTAINER_FORMAT \ + --disk-format $DISK_FORMAT \ + ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \ + ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" # override DEFAULT_IMAGE_NAME so that tempest can find the image # that we just uploaded in glance @@ -439,15 +439,15 @@ function add_baremetal_node() { mac_2=${2:-$BM_SECOND_MAC} id=$(nova baremetal-node-create \ - --pm_address="$BM_PM_ADDR" \ - --pm_user="$BM_PM_USER" \ - --pm_password="$BM_PM_PASS" \ - "$BM_HOSTNAME" \ - "$BM_FLAVOR_CPU" \ - "$BM_FLAVOR_RAM" \ - "$BM_FLAVOR_ROOT_DISK" \ - "$mac_1" \ - | grep ' id ' | get_field 2 ) + --pm_address="$BM_PM_ADDR" \ + --pm_user="$BM_PM_USER" \ + --pm_password="$BM_PM_PASS" \ + "$BM_HOSTNAME" \ + "$BM_FLAVOR_CPU" \ + "$BM_FLAVOR_RAM" \ + "$BM_FLAVOR_ROOT_DISK" \ + "$mac_1" \ + | grep ' id ' | get_field 2 ) [ $? -eq 0 ] || [ "$id" ] || die $LINENO "Error adding baremetal node" if [ -n "$mac_2" ]; then id2=$(nova baremetal-interface-add "$id" "$mac_2" ) diff --git a/lib/glance b/lib/glance index c6f11d06da..75e3dd053d 100644 --- a/lib/glance +++ b/lib/glance @@ -194,7 +194,7 @@ function start_glance() { screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then - die $LINENO "g-api did not start" + die $LINENO "g-api did not start" fi } diff --git a/lib/ironic b/lib/ironic index 89d0edc1a4..649c1c2cd6 100644 --- a/lib/ironic +++ b/lib/ironic @@ -203,7 +203,7 @@ function start_ironic_api() { screen_it ir-api "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE" echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then - die $LINENO "ir-api did not start" + die $LINENO "ir-api did not start" fi } diff --git a/lib/keystone b/lib/keystone index c93a4367d2..beddb1cd75 100755 --- a/lib/keystone +++ b/lib/keystone @@ -373,7 +373,7 @@ function start_keystone() { echo "Waiting for keystone to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then - die $LINENO "keystone did not start" + die $LINENO "keystone did not start" fi # Start proxies if enabled diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema index 09dc46bd83..5b5c4590c3 100644 --- a/lib/neutron_thirdparty/trema +++ b/lib/neutron_thirdparty/trema @@ -66,8 +66,8 @@ function init_trema() { cp $TREMA_SS_DIR/sliceable_switch_null.conf $TREMA_SS_CONFIG sed -i -e "s|^\$apps_dir.*$|\$apps_dir = \"$TREMA_DIR/apps\"|" \ - -e "s|^\$db_dir.*$|\$db_dir = \"$TREMA_SS_DB_DIR\"|" \ - $TREMA_SS_CONFIG + -e "s|^\$db_dir.*$|\$db_dir = \"$TREMA_SS_DB_DIR\"|" \ + $TREMA_SS_CONFIG } function gem_install() { diff --git a/lib/nova b/lib/nova index 09332cf941..809f8e5896 100644 --- a/lib/nova +++ b/lib/nova @@ -465,27 +465,27 @@ function create_nova_conf() { fi if is_service_enabled n-novnc || is_service_enabled n-xvnc; then - # Address on which instance vncservers will listen on compute hosts. - # For multi-host, this should be the management ip of the compute host. - VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} - VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} - iniset $NOVA_CONF DEFAULT vnc_enabled true - iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" - iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + # Address on which instance vncservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} + VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} + iniset $NOVA_CONF DEFAULT vnc_enabled true + iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" + iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" else - iniset $NOVA_CONF DEFAULT vnc_enabled false + iniset $NOVA_CONF DEFAULT vnc_enabled false fi if is_service_enabled n-spice; then - # Address on which instance spiceservers will listen on compute hosts. - # For multi-host, this should be the management ip of the compute host. - SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1} - SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1} - iniset $NOVA_CONF spice enabled true - iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN" - iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" + # Address on which instance spiceservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1} + SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1} + iniset $NOVA_CONF spice enabled true + iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN" + iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" else - iniset $NOVA_CONF spice enabled false + iniset $NOVA_CONF spice enabled false fi iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" @@ -602,7 +602,7 @@ function start_nova_api() { screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api" echo "Waiting for nova-api to start..." if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then - die $LINENO "nova-api did not start" + die $LINENO "nova-api did not start" fi # Start proxies if enabled @@ -620,10 +620,9 @@ function start_nova_compute() { # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM'" elif [[ "$VIRT_DRIVER" = 'fake' ]]; then - for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE` - do - screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM" - done + for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do + screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM" + done else if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then start_nova_hypervisor diff --git a/lib/nova_plugins/hypervisor-baremetal b/lib/nova_plugins/hypervisor-baremetal index 4e7c1734d1..660c977bde 100644 --- a/lib/nova_plugins/hypervisor-baremetal +++ b/lib/nova_plugins/hypervisor-baremetal @@ -61,8 +61,8 @@ function configure_nova_hypervisor() { # Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``. for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do - # Attempt to convert flags to options - iniset $NOVA_CONF baremetal ${I/=/ } + # Attempt to convert flags to options + iniset $NOVA_CONF baremetal ${I/=/ } done } diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index caf0296ad2..6fae0b17d0 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -82,10 +82,10 @@ EOF" sudo mkdir -p $rules_dir sudo bash -c "cat < $rules_dir/50-libvirt-$STACK_USER.rules polkit.addRule(function(action, subject) { - if (action.id == 'org.libvirt.unix.manage' && - subject.user == '"$STACK_USER"') { - return polkit.Result.YES; - } + if (action.id == 'org.libvirt.unix.manage' && + subject.user == '"$STACK_USER"') { + return polkit.Result.YES; + } }); EOF" unset rules_dir diff --git a/lib/rpc_backend b/lib/rpc_backend index 44c1e44817..a323d649a7 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -102,9 +102,9 @@ function install_rpc_backend() { if is_fedora; then install_package qpid-cpp-server if [[ $DISTRO =~ (rhel6) ]]; then - # RHEL6 leaves "auth=yes" in /etc/qpidd.conf, it needs to - # be no or you get GSS authentication errors as it - # attempts to default to this. + # RHEL6 leaves "auth=yes" in /etc/qpidd.conf, it needs to + # be no or you get GSS authentication errors as it + # attempts to default to this. sudo sed -i.bak 's/^auth=yes$/auth=no/' /etc/qpidd.conf fi elif is_ubuntu; then diff --git a/lib/swift b/lib/swift index 6ab43c420f..8726f1e7fc 100644 --- a/lib/swift +++ b/lib/swift @@ -104,17 +104,17 @@ ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012} # cleanup_swift() - Remove residual data files function cleanup_swift() { - rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} - if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 - fi - if [[ -e ${SWIFT_DISK_IMAGE} ]]; then - rm ${SWIFT_DISK_IMAGE} - fi - rm -rf ${SWIFT_DATA_DIR}/run/ - if is_apache_enabled_service swift; then - _cleanup_swift_apache_wsgi - fi + rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} + if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then + sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 + fi + if [[ -e ${SWIFT_DISK_IMAGE} ]]; then + rm ${SWIFT_DISK_IMAGE} + fi + rm -rf ${SWIFT_DATA_DIR}/run/ + if is_apache_enabled_service swift; then + _cleanup_swift_apache_wsgi + fi } # _cleanup_swift_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file @@ -192,7 +192,7 @@ function _config_swift_apache_wsgi() { sudo cp ${SWIFT_DIR}/examples/apache2/account-server.template ${apache_vhost_dir}/account-server-${node_number} sudo sed -e " - /^#/d;/^$/d; + /^#/d;/^$/d; s/%PORT%/$account_port/g; s/%SERVICENAME%/account-server-${node_number}/g; s/%APACHE_NAME%/${APACHE_NAME}/g; @@ -202,7 +202,7 @@ function _config_swift_apache_wsgi() { sudo cp ${SWIFT_DIR}/examples/wsgi/account-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi sudo sed -e " - /^#/d;/^$/d; + /^#/d;/^$/d; s/%SERVICECONF%/account-server\/${node_number}.conf/g; " -i ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi done @@ -577,26 +577,26 @@ function start_swift() { return 0 fi - # By default with only one replica we are launching the proxy, - # container, account and object server in screen in foreground and - # other services in background. If we have SWIFT_REPLICAS set to something - # greater than one we first spawn all the swift services then kill the proxy - # service so we can run it in foreground in screen. ``swift-init ... - # {stop|restart}`` exits with '1' if no servers are running, ignore it just - # in case - swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true - if [[ ${SWIFT_REPLICAS} == 1 ]]; then + # By default with only one replica we are launching the proxy, + # container, account and object server in screen in foreground and + # other services in background. If we have SWIFT_REPLICAS set to something + # greater than one we first spawn all the swift services then kill the proxy + # service so we can run it in foreground in screen. ``swift-init ... + # {stop|restart}`` exits with '1' if no servers are running, ignore it just + # in case + swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true + if [[ ${SWIFT_REPLICAS} == 1 ]]; then todo="object container account" - fi - for type in proxy ${todo}; do - swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true - done - screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" - if [[ ${SWIFT_REPLICAS} == 1 ]]; then - for type in object container account; do - screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" - done - fi + fi + for type in proxy ${todo}; do + swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true + done + screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" + if [[ ${SWIFT_REPLICAS} == 1 ]]; then + for type in object container account; do + screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" + done + fi } # stop_swift() - Stop running processes (non-screen) diff --git a/lib/tempest b/lib/tempest index 9f41608187..8e4e5210ea 100644 --- a/lib/tempest +++ b/lib/tempest @@ -193,7 +193,7 @@ function configure_tempest() { # If namespaces are disabled, devstack will create a single # public router that tempest should be configured to use. public_router_id=$(neutron router-list | awk "/ $Q_ROUTER_NAME / \ - { print \$2 }") + { print \$2 }") fi fi @@ -328,15 +328,15 @@ function init_tempest() { local disk_image="$image_dir/${base_image_name}-blank.img" # if the cirros uec downloaded and the system is uec capable if [ -f "$kernel" -a -f "$ramdisk" -a -f "$disk_image" -a "$VIRT_DRIVER" != "openvz" \ - -a \( "$LIBVIRT_TYPE" != "lxc" -o "$VIRT_DRIVER" != "libvirt" \) ]; then - echo "Prepare aki/ari/ami Images" - ( #new namespace - # tenant:demo ; user: demo - source $TOP_DIR/accrc/demo/demo - euca-bundle-image -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH" - euca-bundle-image -i "$ramdisk" --ramdisk true -d "$BOTO_MATERIALS_PATH" - euca-bundle-image -i "$disk_image" -d "$BOTO_MATERIALS_PATH" - ) 2>&1 &1 Date: Tue, 22 Oct 2013 10:06:06 -0400 Subject: [PATCH 0197/4438] clean up whitespace issues on exercises and friends Change-Id: I812a73e46ddd4d5fed4d304d9ef92c1de243f497 --- exercises/boot_from_volume.sh | 2 +- exercises/docker.sh | 3 +- exercises/euca.sh | 52 +++++++++++++++++------------------ exercises/floating_ips.sh | 4 +-- exercises/neutron-adv-test.sh | 24 ++++++++-------- exercises/volumes.sh | 2 +- files/keystone_data.sh | 24 ++++++++-------- tests/functions.sh | 8 +++--- 8 files changed, 59 insertions(+), 60 deletions(-) diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index fe27bd0956..634a6d526c 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -119,7 +119,7 @@ nova flavor-list INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) fi # Clean-up from previous runs diff --git a/exercises/docker.sh b/exercises/docker.sh index 0672bc0087..10c5436c35 100755 --- a/exercises/docker.sh +++ b/exercises/docker.sh @@ -62,7 +62,7 @@ die_if_not_set $LINENO IMAGE "Failure getting image $DOCKER_IMAGE_NAME" INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) fi # Clean-up from previous runs @@ -102,4 +102,3 @@ set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" echo "*********************************************************************" - diff --git a/exercises/euca.sh b/exercises/euca.sh index 64c0014236..ed521e4f7f 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -87,31 +87,31 @@ fi # Volumes # ------- if is_service_enabled c-vol && ! is_service_enabled n-cell; then - VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2` - die_if_not_set $LINENO VOLUME_ZONE "Failure to find zone for volume" - - VOLUME=`euca-create-volume -s 1 -z $VOLUME_ZONE | cut -f2` - die_if_not_set $LINENO VOLUME "Failure to create volume" - - # Test that volume has been created - VOLUME=`euca-describe-volumes $VOLUME | cut -f2` - die_if_not_set $LINENO VOLUME "Failure to get volume" - - # Test volume has become available - if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then - die $LINENO "volume didn't become available within $RUNNING_TIMEOUT seconds" - fi - - # Attach volume to an instance - euca-attach-volume -i $INSTANCE -d $ATTACH_DEVICE $VOLUME || \ - die $LINENO "Failure attaching volume $VOLUME to $INSTANCE" - if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -A 1 in-use | grep -q attach; do sleep 1; done"; then - die $LINENO "Could not attach $VOLUME to $INSTANCE" - fi - - # Detach volume from an instance - euca-detach-volume $VOLUME || \ - die $LINENO "Failure detaching volume $VOLUME to $INSTANCE" + VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2` + die_if_not_set $LINENO VOLUME_ZONE "Failure to find zone for volume" + + VOLUME=`euca-create-volume -s 1 -z $VOLUME_ZONE | cut -f2` + die_if_not_set $LINENO VOLUME "Failure to create volume" + + # Test that volume has been created + VOLUME=`euca-describe-volumes $VOLUME | cut -f2` + die_if_not_set $LINENO VOLUME "Failure to get volume" + + # Test volume has become available + if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then + die $LINENO "volume didn't become available within $RUNNING_TIMEOUT seconds" + fi + + # Attach volume to an instance + euca-attach-volume -i $INSTANCE -d $ATTACH_DEVICE $VOLUME || \ + die $LINENO "Failure attaching volume $VOLUME to $INSTANCE" + if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -A 1 in-use | grep -q attach; do sleep 1; done"; then + die $LINENO "Could not attach $VOLUME to $INSTANCE" + fi + + # Detach volume from an instance + euca-detach-volume $VOLUME || \ + die $LINENO "Failure detaching volume $VOLUME to $INSTANCE" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then die $LINENO "Could not detach $VOLUME to $INSTANCE" fi @@ -120,7 +120,7 @@ if is_service_enabled c-vol && ! is_service_enabled n-cell; then euca-delete-volume $VOLUME || \ die $LINENO "Failure to delete volume" if ! timeout $ACTIVE_TIMEOUT sh -c "while euca-describe-volumes | grep $VOLUME; do sleep 1; done"; then - die $LINENO "Could not delete $VOLUME" + die $LINENO "Could not delete $VOLUME" fi else echo "Volume Tests Skipped" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 2833b650ba..1a1608c872 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -113,7 +113,7 @@ nova flavor-list INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) fi # Clean-up from previous runs @@ -168,7 +168,7 @@ if ! is_service_enabled neutron; then # list floating addresses if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then die $LINENO "Floating IP not allocated" - fi + fi fi # Dis-allow icmp traffic (ping) diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index e0c37ef723..7dfa5dc161 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -273,12 +273,12 @@ function create_vms { } function ping_ip { - # Test agent connection. Assumes namespaces are disabled, and - # that DHCP is in use, but not L3 - local VM_NAME=$1 - local NET_NAME=$2 - IP=$(get_instance_ip $VM_NAME $NET_NAME) - ping_check $NET_NAME $IP $BOOT_TIMEOUT + # Test agent connection. Assumes namespaces are disabled, and + # that DHCP is in use, but not L3 + local VM_NAME=$1 + local NET_NAME=$2 + IP=$(get_instance_ip $VM_NAME $NET_NAME) + ping_check $NET_NAME $IP $BOOT_TIMEOUT } function check_vm { @@ -330,12 +330,12 @@ function delete_network { } function delete_networks { - foreach_tenant_net 'delete_network ${%TENANT%_NAME} %NUM%' - #TODO(nati) add secuirty group check after it is implemented - # source $TOP_DIR/openrc demo1 demo1 - # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 - # source $TOP_DIR/openrc demo2 demo2 - # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 + foreach_tenant_net 'delete_network ${%TENANT%_NAME} %NUM%' + # TODO(nati) add secuirty group check after it is implemented + # source $TOP_DIR/openrc demo1 demo1 + # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 + # source $TOP_DIR/openrc demo2 demo2 + # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 } function create_all { diff --git a/exercises/volumes.sh b/exercises/volumes.sh index e536d16249..9ee9fa910a 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -117,7 +117,7 @@ nova flavor-list INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) fi # Clean-up from previous runs diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 3f3137cb14..ea2d52d114 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -66,12 +66,12 @@ fi # Heat if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then HEAT_USER=$(get_id keystone user-create --name=heat \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=heat@example.com) + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=heat@example.com) keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $HEAT_USER \ - --role-id $SERVICE_ROLE + --user-id $HEAT_USER \ + --role-id $SERVICE_ROLE # heat_stack_user role is for users created by Heat keystone role-create --name heat_stack_user if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then @@ -126,16 +126,16 @@ fi # Ceilometer if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then CEILOMETER_USER=$(get_id keystone user-create --name=ceilometer \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=ceilometer@example.com) + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=ceilometer@example.com) keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $CEILOMETER_USER \ - --role-id $ADMIN_ROLE + --user-id $CEILOMETER_USER \ + --role-id $ADMIN_ROLE # Ceilometer needs ResellerAdmin role to access swift account stats. keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $CEILOMETER_USER \ - --role-id $RESELLER_ROLE + --user-id $CEILOMETER_USER \ + --role-id $RESELLER_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then CEILOMETER_SERVICE=$(get_id keystone service-create \ --name=ceilometer \ diff --git a/tests/functions.sh b/tests/functions.sh index 7d486d4cc5..40376aa63f 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -122,16 +122,16 @@ fi # test empty option if ini_has_option test.ini ddd empty; then - echo "OK: ddd.empty present" + echo "OK: ddd.empty present" else - echo "ini_has_option failed: ddd.empty not found" + echo "ini_has_option failed: ddd.empty not found" fi # test non-empty option if ini_has_option test.ini bbb handlers; then - echo "OK: bbb.handlers present" + echo "OK: bbb.handlers present" else - echo "ini_has_option failed: bbb.handlers not found" + echo "ini_has_option failed: bbb.handlers not found" fi # test changing empty option From b83c365cf540261c9455a41f4f96aa3c0695fa9c Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Oct 2013 10:08:04 -0400 Subject: [PATCH 0198/4438] clean up whitespace on stack.sh Change-Id: If73435968cfbd0dd3cc519f0a30e02bec5fcb386 --- stack.sh | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/stack.sh b/stack.sh index f54d0f240b..5813a8ad09 100755 --- a/stack.sh +++ b/stack.sh @@ -1018,7 +1018,7 @@ if is_service_enabled nova && is_baremetal; then prepare_baremetal_toolchain configure_baremetal_nova_dirs if [[ "$BM_USE_FAKE_ENV" = "True" ]]; then - create_fake_baremetal_env + create_fake_baremetal_env fi fi @@ -1180,26 +1180,26 @@ if is_service_enabled g-reg; then die_if_not_set $LINENO TOKEN "Keystone fail to get token" if is_baremetal; then - echo_summary "Creating and uploading baremetal images" + echo_summary "Creating and uploading baremetal images" - # build and upload separate deploy kernel & ramdisk - upload_baremetal_deploy $TOKEN + # build and upload separate deploy kernel & ramdisk + upload_baremetal_deploy $TOKEN - # upload images, separating out the kernel & ramdisk for PXE boot - for image_url in ${IMAGE_URLS//,/ }; do - upload_baremetal_image $image_url $TOKEN - done + # upload images, separating out the kernel & ramdisk for PXE boot + for image_url in ${IMAGE_URLS//,/ }; do + upload_baremetal_image $image_url $TOKEN + done else - echo_summary "Uploading images" + echo_summary "Uploading images" - # Option to upload legacy ami-tty, which works with xenserver - if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then - IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz" - fi + # Option to upload legacy ami-tty, which works with xenserver + if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then + IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz" + fi - for image_url in ${IMAGE_URLS//,/ }; do - upload_image $image_url $TOKEN - done + for image_url in ${IMAGE_URLS//,/ }; do + upload_image $image_url $TOKEN + done fi fi @@ -1211,7 +1211,7 @@ fi if is_service_enabled nova && is_baremetal; then # create special flavor for baremetal if we know what images to associate [[ -n "$BM_DEPLOY_KERNEL_ID" ]] && [[ -n "$BM_DEPLOY_RAMDISK_ID" ]] && \ - create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID + create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID # otherwise user can manually add it later by calling nova-baremetal-manage [[ -n "$BM_FIRST_MAC" ]] && add_baremetal_node @@ -1233,7 +1233,7 @@ fi CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT") echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ - SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP; do + SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP; do echo $i=${!i} >>$TOP_DIR/.stackenv done From 02d7fe13bb714c3c8c28fbe16ecbeac472a80094 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Oct 2013 11:31:21 -0400 Subject: [PATCH 0199/4438] add support for heredoc folding of lines this change in the parser allows for us to have heredocs folded into logical lines. Change-Id: I51ebe6cd7b89b5f7194e947896f20b6750e972e3 --- tools/bash8.py | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/tools/bash8.py b/tools/bash8.py index 82a10107e1..edf7da4645 100755 --- a/tools/bash8.py +++ b/tools/bash8.py @@ -55,10 +55,41 @@ def check_indents(line): print_error('E003: Indent not multiple of 4', line) +def starts_multiline(line): + m = re.search("[^<]<<\s*(?P\w+)", line) + if m: + return m.group('token') + else: + return False + + +def end_of_multiline(line, token): + if token: + return re.search("^%s\s*$" % token, line) is not None + return False + + def check_files(files): + in_multiline = False + logical_line = "" + token = False for line in fileinput.input(files): - check_no_trailing_whitespace(line) - check_indents(line) + # NOTE(sdague): multiline processing of heredocs is interesting + if not in_multiline: + logical_line = line + token = starts_multiline(line) + if token: + in_multiline = True + continue + else: + logical_line = logical_line + line + if not end_of_multiline(line, token): + continue + else: + in_multiline = False + + check_no_trailing_whitespace(logical_line) + check_indents(logical_line) def get_options(): From 0b865a55f2b6fa1435e8bf6df09218a9bf7a0ca0 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Oct 2013 11:37:35 -0400 Subject: [PATCH 0200/4438] final bash8 files for the rest of devstack With this devstack/master is bash8 clean, and ready for enforcement Change-Id: I03fc89b401e6b7a23224d71472122c1bfa3ad0bd --- tools/build_bm_multi.sh | 4 +- tools/build_uec.sh | 6 +- tools/create_userrc.sh | 8 +- tools/jenkins/jenkins_home/build_jenkins.sh | 16 +-- tools/xen/install_os_domU.sh | 10 +- tools/xen/scripts/install-os-vpx.sh | 114 ++++++++++---------- tools/xen/scripts/uninstall-os-vpx.sh | 58 +++++----- 7 files changed, 108 insertions(+), 108 deletions(-) diff --git a/tools/build_bm_multi.sh b/tools/build_bm_multi.sh index 52b9b4ea32..328d5762fc 100755 --- a/tools/build_bm_multi.sh +++ b/tools/build_bm_multi.sh @@ -22,8 +22,8 @@ run_bm STACKMASTER $HEAD_HOST "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vn if [ ! "$TERMINATE" = "1" ]; then echo "Waiting for head node ($HEAD_HOST) to start..." if ! timeout 60 sh -c "while ! wget -q -O- http://$HEAD_HOST | grep -q username; do sleep 1; done"; then - echo "Head node did not start" - exit 1 + echo "Head node did not start" + exit 1 fi fi diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 6c4a26c2e3..bce051a0b7 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -229,8 +229,8 @@ EOF # (re)start a metadata service ( - pid=`lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | tail -1` - [ -z "$pid" ] || kill -9 $pid + pid=`lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | tail -1` + [ -z "$pid" ] || kill -9 $pid ) cd $vm_dir/uec python meta.py 192.168.$GUEST_NETWORK.1:4567 & @@ -268,7 +268,7 @@ if [ "$WAIT_TILL_LAUNCH" = "1" ]; then sleep 2 while [ ! -e "$vm_dir/console.log" ]; do - sleep 1 + sleep 1 done tail -F $vm_dir/console.log & diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index 44b0f6bba0..8383fe7d77 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -105,15 +105,15 @@ if [ -z "$OS_PASSWORD" ]; then fi if [ -z "$OS_TENANT_NAME" -a -z "$OS_TENANT_ID" ]; then - export OS_TENANT_NAME=admin + export OS_TENANT_NAME=admin fi if [ -z "$OS_USERNAME" ]; then - export OS_USERNAME=admin + export OS_USERNAME=admin fi if [ -z "$OS_AUTH_URL" ]; then - export OS_AUTH_URL=http://localhost:5000/v2.0/ + export OS_AUTH_URL=http://localhost:5000/v2.0/ fi USER_PASS=${USER_PASS:-$OS_PASSWORD} @@ -249,7 +249,7 @@ if [ $MODE != "create" ]; then for user_id_at_name in `keystone user-list --tenant-id $tenant_id | awk 'BEGIN {IGNORECASE = 1} /true[[:space:]]*\|[^|]*\|$/ {print $2 "@" $4}'`; do read user_id user_name <<< `echo "$user_id_at_name" | sed 's/@/ /'` if [ $MODE = one -a "$user_name" != "$USER_NAME" ]; then - continue; + continue; fi add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" done diff --git a/tools/jenkins/jenkins_home/build_jenkins.sh b/tools/jenkins/jenkins_home/build_jenkins.sh index e0e774ee9e..a556db0f1d 100755 --- a/tools/jenkins/jenkins_home/build_jenkins.sh +++ b/tools/jenkins/jenkins_home/build_jenkins.sh @@ -6,8 +6,8 @@ set -o errexit # Make sure only root can run our script if [[ $EUID -ne 0 ]]; then - echo "This script must be run as root" - exit 1 + echo "This script must be run as root" + exit 1 fi # This directory @@ -31,15 +31,15 @@ apt-get install -y --force-yes $DEPS # Install jenkins if [ ! -e /var/lib/jenkins ]; then - echo "Jenkins installation failed" - exit 1 + echo "Jenkins installation failed" + exit 1 fi # Make sure user has configured a jenkins ssh pubkey if [ ! -e /var/lib/jenkins/.ssh/id_rsa.pub ]; then - echo "Public key for jenkins is missing. This is used to ssh into your instances." - echo "Please run "su -c ssh-keygen jenkins" before proceeding" - exit 1 + echo "Public key for jenkins is missing. This is used to ssh into your instances." + echo "Please run "su -c ssh-keygen jenkins" before proceeding" + exit 1 fi # Setup sudo @@ -96,7 +96,7 @@ PLUGINS=http://hudson-ci.org/downloads/plugins/build-timeout/1.6/build-timeout.h # Configure plugins for plugin in ${PLUGINS//,/ }; do - name=`basename $plugin` + name=`basename $plugin` dest=/var/lib/jenkins/plugins/$name if [ ! -e $dest ]; then curl -L $plugin -o $dest diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 0f314bfa9a..9a2f5a8c03 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -44,9 +44,9 @@ source $THIS_DIR/xenrc xe_min() { - local cmd="$1" - shift - xe "$cmd" --minimal "$@" + local cmd="$1" + shift + xe "$cmd" --minimal "$@" } # @@ -132,8 +132,8 @@ HOST_IP=$(xenapi_ip_on "$MGT_BRIDGE_OR_NET_NAME") # Set up ip forwarding, but skip on xcp-xapi if [ -a /etc/sysconfig/network ]; then if ! grep -q "FORWARD_IPV4=YES" /etc/sysconfig/network; then - # FIXME: This doesn't work on reboot! - echo "FORWARD_IPV4=YES" >> /etc/sysconfig/network + # FIXME: This doesn't work on reboot! + echo "FORWARD_IPV4=YES" >> /etc/sysconfig/network fi fi # Also, enable ip forwarding in rc.local, since the above trick isn't working diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index 7469e0c10b..7b0d891493 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -42,69 +42,69 @@ EOF get_params() { - while getopts "hbn:r:l:t:" OPTION; - do - case $OPTION in - h) usage - exit 1 - ;; - n) - BRIDGE=$OPTARG - ;; - l) - NAME_LABEL=$OPTARG - ;; - t) - TEMPLATE_NAME=$OPTARG - ;; - ?) - usage - exit - ;; - esac - done - if [[ -z $BRIDGE ]] - then - BRIDGE=xenbr0 - fi - - if [[ -z $TEMPLATE_NAME ]]; then - echo "Please specify a template name" >&2 - exit 1 - fi - - if [[ -z $NAME_LABEL ]]; then - echo "Please specify a name-label for the new VM" >&2 - exit 1 - fi + while getopts "hbn:r:l:t:" OPTION; + do + case $OPTION in + h) usage + exit 1 + ;; + n) + BRIDGE=$OPTARG + ;; + l) + NAME_LABEL=$OPTARG + ;; + t) + TEMPLATE_NAME=$OPTARG + ;; + ?) + usage + exit + ;; + esac + done + if [[ -z $BRIDGE ]] + then + BRIDGE=xenbr0 + fi + + if [[ -z $TEMPLATE_NAME ]]; then + echo "Please specify a template name" >&2 + exit 1 + fi + + if [[ -z $NAME_LABEL ]]; then + echo "Please specify a name-label for the new VM" >&2 + exit 1 + fi } xe_min() { - local cmd="$1" - shift - xe "$cmd" --minimal "$@" + local cmd="$1" + shift + xe "$cmd" --minimal "$@" } find_network() { - result=$(xe_min network-list bridge="$1") - if [ "$result" = "" ] - then - result=$(xe_min network-list name-label="$1") - fi - echo "$result" + result=$(xe_min network-list bridge="$1") + if [ "$result" = "" ] + then + result=$(xe_min network-list name-label="$1") + fi + echo "$result" } create_vif() { - local v="$1" - echo "Installing VM interface on [$BRIDGE]" - local out_network_uuid=$(find_network "$BRIDGE") - xe vif-create vm-uuid="$v" network-uuid="$out_network_uuid" device="0" + local v="$1" + echo "Installing VM interface on [$BRIDGE]" + local out_network_uuid=$(find_network "$BRIDGE") + xe vif-create vm-uuid="$v" network-uuid="$out_network_uuid" device="0" } @@ -112,20 +112,20 @@ create_vif() # Make the VM auto-start on server boot. set_auto_start() { - local v="$1" - xe vm-param-set uuid="$v" other-config:auto_poweron=true + local v="$1" + xe vm-param-set uuid="$v" other-config:auto_poweron=true } destroy_vifs() { - local v="$1" - IFS=, - for vif in $(xe_min vif-list vm-uuid="$v") - do - xe vif-destroy uuid="$vif" - done - unset IFS + local v="$1" + IFS=, + for vif in $(xe_min vif-list vm-uuid="$v") + do + xe vif-destroy uuid="$vif" + done + unset IFS } diff --git a/tools/xen/scripts/uninstall-os-vpx.sh b/tools/xen/scripts/uninstall-os-vpx.sh index ac260949c4..1ed249433a 100755 --- a/tools/xen/scripts/uninstall-os-vpx.sh +++ b/tools/xen/scripts/uninstall-os-vpx.sh @@ -22,63 +22,63 @@ set -ex # By default, don't remove the templates REMOVE_TEMPLATES=${REMOVE_TEMPLATES:-"false"} if [ "$1" = "--remove-templates" ]; then - REMOVE_TEMPLATES=true + REMOVE_TEMPLATES=true fi xe_min() { - local cmd="$1" - shift - xe "$cmd" --minimal "$@" + local cmd="$1" + shift + xe "$cmd" --minimal "$@" } destroy_vdi() { - local vbd_uuid="$1" - local type=$(xe_min vbd-list uuid=$vbd_uuid params=type) - local dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice) - local vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid) + local vbd_uuid="$1" + local type=$(xe_min vbd-list uuid=$vbd_uuid params=type) + local dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice) + local vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid) - if [ "$type" == 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]; then - xe vdi-destroy uuid=$vdi_uuid - fi + if [ "$type" == 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]; then + xe vdi-destroy uuid=$vdi_uuid + fi } uninstall() { - local vm_uuid="$1" - local power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state) + local vm_uuid="$1" + local power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state) - if [ "$power_state" != "halted" ]; then - xe vm-shutdown vm=$vm_uuid force=true - fi + if [ "$power_state" != "halted" ]; then + xe vm-shutdown vm=$vm_uuid force=true + fi - for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do - destroy_vdi "$v" - done + for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do + destroy_vdi "$v" + done - xe vm-uninstall vm=$vm_uuid force=true >/dev/null + xe vm-uninstall vm=$vm_uuid force=true >/dev/null } uninstall_template() { - local vm_uuid="$1" + local vm_uuid="$1" - for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do - destroy_vdi "$v" - done + for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do + destroy_vdi "$v" + done - xe template-uninstall template-uuid=$vm_uuid force=true >/dev/null + xe template-uninstall template-uuid=$vm_uuid force=true >/dev/null } # remove the VMs and their disks for u in $(xe_min vm-list other-config:os-vpx=true | sed -e 's/,/ /g'); do - uninstall "$u" + uninstall "$u" done # remove the templates if [ "$REMOVE_TEMPLATES" == "true" ]; then - for u in $(xe_min template-list other-config:os-vpx=true | sed -e 's/,/ /g'); do - uninstall_template "$u" - done + for u in $(xe_min template-list other-config:os-vpx=true | sed -e 's/,/ /g'); do + uninstall_template "$u" + done fi From 9b973670a6c200e5f6251bb21eb443be619694c6 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 16 Oct 2013 15:13:56 -0500 Subject: [PATCH 0201/4438] Add the doc build tools tools/build_docs.sh generates the devstack.org website from the static pages and generated pages created by running shocco against a DevStack checkout. Note that while this is the complete auto page generation of the devstack.org site, pushing the content back to GitHub is limited to those with push access to the current repo. Partial-bug 1235626 Change-Id: I61dc3d56e4a4832a9ddd1904dd8af65c15a17e50 --- tools/build_docs.sh | 135 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 135 insertions(+) create mode 100755 tools/build_docs.sh diff --git a/tools/build_docs.sh b/tools/build_docs.sh new file mode 100755 index 0000000000..216e557025 --- /dev/null +++ b/tools/build_docs.sh @@ -0,0 +1,135 @@ +#!/usr/bin/env bash + +# **build_docs.sh** - Build the gh-pages docs for DevStack +# +# - Install shocco if not found on PATH +# - Clone MASTER_REPO branch MASTER_BRANCH +# - Re-creates ``docs`` directory from existing repo + new generated script docs + +# Usage: +## build_docs.sh [[-b branch] [-p] repo] | . +## -b branch The DevStack branch to check out (default is master; ignored if +## repo is not specified) +## -p Push the resulting docs tree to the source repo; fatal error if +## repo is not specified +## repo The DevStack repository to clone (default is DevStack github repo) +## If a repo is not supplied use the current directory +## (assumed to be a DevStack checkout) as the source. +## . Use the current repo and branch (do not use with -p to +## prevent stray files in the workspace being added tot he docs) + +# Defaults +# -------- + +# Source repo/branch for DevStack +MASTER_REPO=${MASTER_REPO:-https://github.com/openstack-dev/devstack.git} +MASTER_BRANCH=${MASTER_BRANCH:-master} + +# http://devstack.org is a GitHub gh-pages site in the https://github.com/cloudbuilders/devtack.git repo +GH_PAGES_REPO=git@github.com:cloudbuilders/devstack.git + +# Uses this shocco branch: https://github.com/dtroyer/shocco/tree/rst_support +SHOCCO=${SHOCCO:-shocco} +if ! which shocco; then + if [[ ! -x shocco/shocco ]]; then + if [[ -z "$INSTALL_SHOCCO" ]]; then + echo "shocco not found in \$PATH, please set environment variable SHOCCO" + exit 1 + fi + echo "Installing local copy of shocco" + git clone -b rst_support https://github.com/dtroyer/shocco shocco + cd shocco + ./configure + make + cd .. + fi + SHOCCO=shocco/shocco +fi + +# Process command-line args +while getopts b:p c; do + case $c in + b) MASTER_BRANCH=$OPTARG + ;; + p) PUSH_REPO=1 + ;; + esac +done +shift `expr $OPTIND - 1` + +# Sanity check the args +if [[ "$1" == "." ]]; then + REPO="" + if [[ -n $PUSH_REPO ]]; then + echo "Push not allowed from an active workspace" + unset PUSH_REPO + fi +else + if [[ -z "$1" ]]; then + REPO=$MASTER_REPO + else + REPO=$1 + fi +fi + +# Check out a specific DevStack branch +if [[ -n $REPO ]]; then + # Make a workspace + TMP_ROOT=$(mktemp -d devstack-docs-XXXX) + echo "Building docs in $TMP_ROOT" + cd $TMP_ROOT + + # Get the master branch + git clone $REPO devstack + cd devstack + git checkout $MASTER_BRANCH +fi + +# Processing +# ---------- + +# Assumption is we are now in the DevStack repo workspace to be processed + +# Pull the latest docs branch from devstack.org repo +rm -rf docs || true +git clone -b gh-pages $GH_PAGES_REPO docs + +# Build list of scripts to process +FILES="" +for f in $(find . -name .git -prune -o \( -type f -name \*.sh -not -path \*shocco/\* -print \)); do + echo $f + FILES+="$f " + mkdir -p docs/`dirname $f`; + $SHOCCO $f > docs/$f.html +done +for f in $(find functions lib samples -type f -name \*); do + echo $f + FILES+="$f " + mkdir -p docs/`dirname $f`; + $SHOCCO $f > docs/$f.html +done +echo "$FILES" >docs-files + +# Switch to the gh_pages repo +cd docs + +# Collect the new generated pages +find . -name \*.html -print0 | xargs -0 git add + +# Push our changes back up to the docs branch +if ! git diff-index HEAD --quiet; then + git commit -a -m "Update script docs" + if [[ -n $PUSH ]]; then + git push + fi +fi + +# Clean up or report the temp workspace +if [[ -n REPO && -n $PUSH_REPO ]]; then + rm -rf $TMP_ROOT +else + if [[ -z "$TMP_ROOT" ]]; then + TMP_ROOT="$(pwd)" + fi + echo "Built docs in $TMP_ROOT" +fi From 23178a997a3b0abd1922f356e572e2933f454dc1 Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Tue, 22 Oct 2013 17:07:32 -0500 Subject: [PATCH 0202/4438] add assertions for blind grep blind grep is error-prone. Add assertions for errors we can not handle Change-Id: Ibe19085545ecc848498506e8b8ee14e71825b273 --- exercises/aggregates.sh | 3 +++ exercises/floating_ips.sh | 1 + exercises/neutron-adv-test.sh | 6 ++++++ exercises/sec_groups.sh | 1 + exercises/volumes.sh | 1 + lib/neutron | 10 ++++++++++ lib/swift | 5 +++++ stack.sh | 2 ++ tools/jenkins/adapters/euca.sh | 1 + tools/jenkins/adapters/floating_ips.sh | 1 + tools/jenkins/adapters/volumes.sh | 1 + 11 files changed, 32 insertions(+) diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index e5fc7dec84..6cc81ae11a 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -66,7 +66,10 @@ exit_if_aggregate_present() { exit_if_aggregate_present $AGGREGATE_NAME AGGREGATE_ID=$(nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1) +die_if_not_set $LINENO AGGREGATE_ID "Failure creating AGGREGATE_ID for $AGGREGATE_NAME $AGGREGATE_A_ZONE" + AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1) +die_if_not_set $LINENO AGGREGATE2_ID "Fail creating AGGREGATE2_ID for $AGGREGATE2_NAME $AGGREGATE_A_ZONE" # check aggregate created nova aggregate-list | grep -q " $AGGREGATE_NAME " || die $LINENO "Aggregate $AGGREGATE_NAME not created" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 1a1608c872..4d71d49163 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -114,6 +114,7 @@ INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) + die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE" fi # Clean-up from previous runs diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index 7dfa5dc161..28e0a3d441 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -139,24 +139,28 @@ function foreach_tenant_net { function get_image_id { local IMAGE_ID=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) + die_if_not_set $LINENO IMAGE_ID "Failure retrieving IMAGE_ID" echo "$IMAGE_ID" } function get_tenant_id { local TENANT_NAME=$1 local TENANT_ID=`keystone tenant-list | grep " $TENANT_NAME " | head -n 1 | get_field 1` + die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for $TENANT_NAME" echo "$TENANT_ID" } function get_user_id { local USER_NAME=$1 local USER_ID=`keystone user-list | grep $USER_NAME | awk '{print $2}'` + die_if_not_set $LINENO USER_ID "Failure retrieving USER_ID for $USER_NAME" echo "$USER_ID" } function get_role_id { local ROLE_NAME=$1 local ROLE_ID=`keystone role-list | grep $ROLE_NAME | awk '{print $2}'` + die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME" echo "$ROLE_ID" } @@ -169,6 +173,7 @@ function get_network_id { function get_flavor_id { local INSTANCE_TYPE=$1 local FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'` + die_if_not_set $LINENO FLAVOR_ID "Failure retrieving FLAVOR_ID for $INSTANCE_TYPE" echo "$FLAVOR_ID" } @@ -234,6 +239,7 @@ function create_network { local TENANT_ID=$(get_tenant_id $TENANT) source $TOP_DIR/openrc $TENANT $TENANT local NET_ID=$(neutron net-create --tenant_id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) + die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA" neutron subnet-create --ip_version 4 --tenant_id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR neutron-debug probe-create --device-owner compute $NET_ID source $TOP_DIR/openrc demo demo diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh index 7d80570326..eb32cc7aa7 100755 --- a/exercises/sec_groups.sh +++ b/exercises/sec_groups.sh @@ -56,6 +56,7 @@ done # Check to make sure rules were added SEC_GROUP_RULES=( $(nova secgroup-list-rules $SEC_GROUP_NAME | grep -v \- | grep -v 'Source Group' | cut -d '|' -f3 | tr -d ' ') ) +die_if_not_set $LINENO SEC_GROUP_RULES "Failure retrieving SEC_GROUP_RULES for $SEC_GROUP_NAME" for i in "${RULES_TO_ADD[@]}"; do skip= for j in "${SEC_GROUP_RULES[@]}"; do diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 9ee9fa910a..77fa4ebc25 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -118,6 +118,7 @@ INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) + die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE" fi # Clean-up from previous runs diff --git a/lib/neutron b/lib/neutron index 44fb9e1005..9227f19b35 100644 --- a/lib/neutron +++ b/lib/neutron @@ -322,6 +322,7 @@ function create_neutron_accounts() { function create_neutron_initial_network() { TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1) + die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for demo" # Create a small network # Since neutron command is executed in admin context at this point, @@ -336,12 +337,16 @@ function create_neutron_initial_network() { sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE done NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2) + die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID" SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $TENANT_ID" sudo ifconfig $OVS_PHYSICAL_BRIDGE up sudo route add default gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE else NET_ID=$(neutron net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID" SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $TENANT_ID" fi if [[ "$Q_L3_ENABLED" == "True" ]]; then @@ -349,14 +354,18 @@ function create_neutron_initial_network() { if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then # create a tenant-owned router. ROUTER_ID=$(neutron router-create --tenant_id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2) + die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $TENANT_ID $Q_ROUTER_NAME" else # Plugin only supports creating a single router, which should be admin owned. ROUTER_ID=$(neutron router-create $Q_ROUTER_NAME | grep ' id ' | get_field 2) + die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME" fi neutron router-interface-add $ROUTER_ID $SUBNET_ID # Create an external network, and a subnet. Configure the external network as router gw EXT_NET_ID=$(neutron net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2) + die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME" EXT_GW_IP=$(neutron subnet-create --ip_version 4 ${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} --gateway $PUBLIC_NETWORK_GATEWAY --name $PUBLIC_SUBNET_NAME $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) + die_if_not_set $LINENO EXT_GW_IP "Failure creating EXT_GW_IP" neutron router-gateway-set $ROUTER_ID $EXT_NET_ID if is_service_enabled q-l3; then @@ -366,6 +375,7 @@ function create_neutron_initial_network() { sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE sudo ip link set $PUBLIC_BRIDGE up ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' '{ print $8; }'` + die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP" sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP fi if [[ "$Q_USE_NAMESPACE" == "False" ]]; then diff --git a/lib/swift b/lib/swift index 8726f1e7fc..c338375f60 100644 --- a/lib/swift +++ b/lib/swift @@ -492,14 +492,19 @@ function create_swift_accounts() { fi SWIFT_TENANT_TEST1=$(keystone tenant-create --name=swifttenanttest1 | grep " id " | get_field 2) + die_if_not_set $LINENO SWIFT_TENANT_TEST1 "Failure creating SWIFT_TENANT_TEST1" SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=testing --email=test@example.com | grep " id " | get_field 2) + die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1" keystone user-role-add --user-id $SWIFT_USER_TEST1 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST1 SWIFT_USER_TEST3=$(keystone user-create --name=swiftusertest3 --pass=testing3 --email=test3@example.com | grep " id " | get_field 2) + die_if_not_set $LINENO SWIFT_USER_TEST3 "Failure creating SWIFT_USER_TEST3" keystone user-role-add --user-id $SWIFT_USER_TEST3 --role-id $ANOTHER_ROLE --tenant-id $SWIFT_TENANT_TEST1 SWIFT_TENANT_TEST2=$(keystone tenant-create --name=swifttenanttest2 | grep " id " | get_field 2) + die_if_not_set $LINENO SWIFT_TENANT_TEST2 "Failure creating SWIFT_TENANT_TEST2" SWIFT_USER_TEST2=$(keystone user-create --name=swiftusertest2 --pass=testing2 --email=test2@example.com | grep " id " | get_field 2) + die_if_not_set $LINENO SWIFT_USER_TEST2 "Failure creating SWIFT_USER_TEST2" keystone user-role-add --user-id $SWIFT_USER_TEST2 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST2 } diff --git a/stack.sh b/stack.sh index 5813a8ad09..3c4afd9fe7 100755 --- a/stack.sh +++ b/stack.sh @@ -1068,7 +1068,9 @@ fi # Create an access key and secret key for nova ec2 register image if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1) + die_if_not_set $LINENO NOVA_USER_ID "Failure retrieving NOVA_USER_ID for nova" NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1) + die_if_not_set $LINENO NOVA_TENANT_ID "Failure retrieving NOVA_TENANT_ID for $SERVICE_TENANT_NAME" CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID) ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') diff --git a/tools/jenkins/adapters/euca.sh b/tools/jenkins/adapters/euca.sh index b49ce9f21f..a7e635c694 100755 --- a/tools/jenkins/adapters/euca.sh +++ b/tools/jenkins/adapters/euca.sh @@ -5,4 +5,5 @@ set -o errexit TOP_DIR=$(cd ../../.. && pwd) HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2` +die_if_not_set $LINENO HEAD_IP "Failure retrieving HEAD_IP" ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./euca.sh' diff --git a/tools/jenkins/adapters/floating_ips.sh b/tools/jenkins/adapters/floating_ips.sh index a97f93578a..8da1eeb97a 100755 --- a/tools/jenkins/adapters/floating_ips.sh +++ b/tools/jenkins/adapters/floating_ips.sh @@ -5,4 +5,5 @@ set -o errexit TOP_DIR=$(cd ../../.. && pwd) HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2` +die_if_not_set $LINENO HEAD_IP "Failure retrieving HEAD_IP" ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./floating_ips.sh' diff --git a/tools/jenkins/adapters/volumes.sh b/tools/jenkins/adapters/volumes.sh index ec292097fa..0a0b6c0548 100755 --- a/tools/jenkins/adapters/volumes.sh +++ b/tools/jenkins/adapters/volumes.sh @@ -5,4 +5,5 @@ set -o errexit TOP_DIR=$(cd ../../.. && pwd) HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2` +die_if_not_set $LINENO HEAD_IP "Failure retrieving HEAD_IP" ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./volumes.sh' From 7a4ae3d24260cc2cd8eaed495829ec44ff121458 Mon Sep 17 00:00:00 2001 From: Akihiro MOTOKI Date: Thu, 10 Oct 2013 00:40:38 +0900 Subject: [PATCH 0203/4438] Modification for Ubuntu 13.10 and minor fixes in Neutron NEC plugin Modifications for Ubuntu 13.10: * Add .conf suffix to apache2 config files. In Ubuntu 13.10, files in sites-available should have ".conf" suffix. Otherwise it is not recognized by a2ensite. * libglib2.0-dev is added to lib/files/apt/trema. Trema is an OpenFlow controler framework used by Neutron NEC plugin Ubuntu package dependency seems to be changed. Minor cleanups are also done in OVS configuration: * Set datapath_id before connecting to the OpenFlow controller to ensure datapath_id changes after connected. Previously datapath_id is changed after connecting to the controller. * Drop "0x" prefix from datapath_id passed to OVS. OVS ignores datapath_id with 0x prefix. * Fix a bug that SKIP_OVS_BRIDGE_SETUP skips all confiugration of the plugin agent. It should skip only OVS setup. Change-Id: Ifac3def8decda577b5740c82fe8d24e8520c7777 --- files/apts/trema | 1 + lib/neutron_plugins/nec | 11 ++++++++--- lib/neutron_thirdparty/trema | 2 +- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/files/apts/trema b/files/apts/trema index e33ccd3004..09cb7c6794 100644 --- a/files/apts/trema +++ b/files/apts/trema @@ -6,6 +6,7 @@ rubygems1.8 ruby1.8-dev libpcap-dev libsqlite3-dev +libglib2.0-dev # Sliceable Switch sqlite3 diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec index 3806c32c75..d8d8b7ce7e 100644 --- a/lib/neutron_plugins/nec +++ b/lib/neutron_plugins/nec @@ -55,21 +55,26 @@ function neutron_plugin_configure_l3_agent() { _neutron_ovs_base_configure_l3_agent } -function neutron_plugin_configure_plugin_agent() { +function _quantum_plugin_setup_bridge() { if [[ "$SKIP_OVS_BRIDGE_SETUP" = "True" ]]; then return fi # Set up integration bridge _neutron_ovs_base_setup_bridge $OVS_BRIDGE - sudo ovs-vsctl --no-wait set-controller $OVS_BRIDGE tcp:$OFC_OFP_HOST:$OFC_OFP_PORT # Generate datapath ID from HOST_IP - local dpid=$(printf "0x%07d%03d%03d%03d\n" ${HOST_IP//./ }) + local dpid=$(printf "%07d%03d%03d%03d\n" ${HOST_IP//./ }) sudo ovs-vsctl --no-wait set Bridge $OVS_BRIDGE other-config:datapath-id=$dpid sudo ovs-vsctl --no-wait set-fail-mode $OVS_BRIDGE secure + sudo ovs-vsctl --no-wait set-controller $OVS_BRIDGE tcp:$OFC_OFP_HOST:$OFC_OFP_PORT if [ -n "$OVS_INTERFACE" ]; then sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $OVS_INTERFACE fi _neutron_setup_ovs_tunnels $OVS_BRIDGE +} + +function neutron_plugin_configure_plugin_agent() { + _quantum_plugin_setup_bridge + AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nec-agent" _neutron_ovs_base_configure_firewall_driver diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema index 5b5c4590c3..9efd3f6c39 100644 --- a/lib/neutron_thirdparty/trema +++ b/lib/neutron_thirdparty/trema @@ -28,7 +28,7 @@ TREMA_TMP_DIR=$TREMA_DATA_DIR/trema TREMA_LOG_LEVEL=${TREMA_LOG_LEVEL:-info} TREMA_SS_CONFIG=$TREMA_SS_ETC_DIR/sliceable.conf -TREMA_SS_APACHE_CONFIG=/etc/apache2/sites-available/sliceable_switch +TREMA_SS_APACHE_CONFIG=/etc/apache2/sites-available/sliceable_switch.conf # configure_trema - Set config files, create data dirs, etc function configure_trema() { From ae9c41727abcab19bed8aa5f72c052ccd442f4ea Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 7 Mar 2013 15:23:46 +0000 Subject: [PATCH 0204/4438] Allow configure cinder with the nfs driver This patch allow to use nfs as cinder driver. To use it, in the localrc we can now set CINDER_DRIVER=nfs CINDER_NFS_SERVERPATH=172.16.0.50:/export_cinder The nfs-server is not setup by devstack. Change-Id: I8e240d00b58f272d04ab2c0922c551b1f7266260 --- lib/cinder | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/cinder b/lib/cinder index f6f137cabd..cab8b6e4fe 100644 --- a/lib/cinder +++ b/lib/cinder @@ -281,6 +281,11 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" ) + elif [ "$CINDER_DRIVER" == "nfs" ]; then + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.nfs.NfsDriver" + iniset $CINDER_CONF DEFAULT nfs_shares_config "$CINDER_CONF_DIR/nfs_shares.conf" + echo "$CINDER_NFS_SERVERPATH" | sudo tee "$CINDER_CONF_DIR/nfs_shares.conf" + sudo chmod 666 $CINDER_CONF_DIR/nfs_shares.conf elif [ "$CINDER_DRIVER" == "sheepdog" ]; then iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver" elif [ "$CINDER_DRIVER" == "glusterfs" ]; then From 6db29904df63ae26a0f4a4b2e4e0c2e6f2cef669 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 22 Oct 2013 09:22:36 -0700 Subject: [PATCH 0205/4438] Start nova-compute with child cell conf A recent commit broke the cells support and switched nova-compute to always start with the API cell .conf. This corrects the regression. Change-Id: I633344c8784c154f61e751cd0a408196e61525b3 Closes-bug: 1243961 --- lib/nova | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/lib/nova b/lib/nova index 809f8e5896..31f286d943 100644 --- a/lib/nova +++ b/lib/nova @@ -613,21 +613,25 @@ function start_nova_api() { # start_nova_compute() - Start the compute process function start_nova_compute() { - NOVA_CONF_BOTTOM=$NOVA_CONF + if is_service_enabled n-cell; then + local compute_cell_conf=$NOVA_CELLS_CONF + else + local compute_cell_conf=$NOVA_CONF + fi if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then # The group **$LIBVIRT_GROUP** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. - screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM'" + screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'" elif [[ "$VIRT_DRIVER" = 'fake' ]]; then for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do - screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM" + screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" done else if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then start_nova_hypervisor fi - screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM" + screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" fi } From 386ae8c17162d8cc950c0f6c71fa364b9cbea9d4 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 21 Oct 2013 09:27:18 +0200 Subject: [PATCH 0206/4438] Additional ssh tests with tempest The run_ssh option is used on the default 'false', so several test case and validation step was disabled. It was disabled because: * Admin password injection with the cirros image is not supported. (We 'cannot' inject password/shadow to a ram disk.) * In the current system setup floating IP is required for connecting with neutron The run_ssh boolean option will be removed from tempest, it will be replaced with ssh_connect_method and ssh_auth_method. Since using a floating ip with nova flat network is not required in these case, the 'fixed'/private IPs (ssh_connect_method) will be used with nova network , and we will use the 'floating' IPs with neutron when the NAMESPACES are enabled(default). The default value of ssh_auth_method is keypair, it works in both cases. Change-Id: I3d47811d801985687526749a430ed6db64224f99 --- lib/tempest | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/tempest b/lib/tempest index 8e4e5210ea..677d66b50d 100644 --- a/lib/tempest +++ b/lib/tempest @@ -75,6 +75,7 @@ function configure_tempest() { local public_router_id local tenant_networks_reachable local boto_instance_type="m1.tiny" + local ssh_connect_method="fixed" # TODO(afazekas): # sudo python setup.py deploy @@ -182,10 +183,13 @@ function configure_tempest() { if [ "$Q_USE_NAMESPACE" != "False" ]; then tenant_networks_reachable=false + ssh_connect_method="floating" else tenant_networks_reachable=true fi + ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method} + if is_service_enabled q-l3; then public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \ awk '{print $2}') @@ -248,6 +252,7 @@ function configure_tempest() { iniset $TEMPEST_CONF compute flavor_ref_alt $flavor_ref_alt iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} + iniset $TEMPEST_CONF compute ssh_connect_method $ssh_connect_method # Compute admin iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED From 33eaa5698ca3ced12d7ab5a181cc381bdb19ce76 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 24 Oct 2013 14:12:44 +0100 Subject: [PATCH 0207/4438] Use heat's default value for max_template_size Instead of aligning value with tempest, make tempest use heat's default value as that is what most people will be deploying with. Change-Id: I77549f2b5e953ff712c50a2b372f6b04725d5eb0 --- lib/heat | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/heat b/lib/heat index 8acadb4ad1..da4505e2f9 100644 --- a/lib/heat +++ b/lib/heat @@ -118,9 +118,6 @@ function configure_heat() { iniset $HEAT_CONF heat_api_cloudwatch bind_host $HEAT_API_CW_HOST iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT - # Set limits to match tempest defaults - iniset $HEAT_CONF DEFAULT max_template_size 10240 - # heat environment sudo mkdir -p $HEAT_ENV_DIR sudo chown $STACK_USER $HEAT_ENV_DIR From cb961597cc30f9d8ece17529f09a8291454827e3 Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Sat, 5 Oct 2013 12:11:07 +0100 Subject: [PATCH 0208/4438] Fix shocco errors and warnings A few Markdown-oriented issues were causing Docutils errors to leak into the end-user docs on http://devstack.org Change-Id: I51fa9698afb1bfb48596478d83bd1fdcd84ac52e --- exercises/swift.sh | 2 +- functions | 27 +++++++++++++++------------ lib/baremetal | 6 ++++-- lib/ceilometer | 2 ++ lib/database | 13 ++++++++----- lib/neutron | 3 ++- lib/swift | 4 ++-- tools/build_ramdisk.sh | 9 ++++----- tools/create-stack-user.sh | 2 ++ tools/fixup_stuff.sh | 6 +++++- 10 files changed, 45 insertions(+), 29 deletions(-) diff --git a/exercises/swift.sh b/exercises/swift.sh index b9f1b566bb..25ea6719c1 100755 --- a/exercises/swift.sh +++ b/exercises/swift.sh @@ -2,7 +2,7 @@ # **swift.sh** -# Test swift via the ``swift`` command line from ``python-swiftclient` +# Test swift via the ``swift`` command line from ``python-swiftclient`` echo "*********************************************************************" echo "Begin DevStack Exercise: $0" diff --git a/functions b/functions index af5a37da17..8d076b72e5 100644 --- a/functions +++ b/functions @@ -54,7 +54,7 @@ function address_in_net() { # Wrapper for ``apt-get`` to set cache and proxy environment variables -# Uses globals ``OFFLINE``, ``*_proxy` +# Uses globals ``OFFLINE``, ``*_proxy`` # apt_get operation package [package ...] function apt_get() { [[ "$OFFLINE" = "True" || -z "$@" ]] && return @@ -260,7 +260,8 @@ function _get_package_dir() { # # Only packages required for the services in 1st argument will be # included. Two bits of metadata are recognized in the prerequisite files: -# - ``# NOPRIME`` defers installation to be performed later in stack.sh +# +# - ``# NOPRIME`` defers installation to be performed later in `stack.sh` # - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection # of the package to the distros listed. The distro names are case insensitive. function get_packages() { @@ -982,7 +983,7 @@ function is_set() { # Wrapper for ``pip install`` to set cache and proxy environment variables # Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``PIP_USE_MIRRORS``, -# ``TRACK_DEPENDS``, ``*_proxy` +# ``TRACK_DEPENDS``, ``*_proxy`` # pip_install package [package ...] function pip_install { [[ "$OFFLINE" = "True" || -z "$@" ]] && return @@ -1011,8 +1012,7 @@ function pip_install { # /tmp/$USER-pip-build. Even if a later component specifies foo < # 1.1, the existing extracted build will be used and cause # confusing errors. By creating unique build directories we avoid - # this problem. See - # https://github.com/pypa/pip/issues/709 + # this problem. See https://github.com/pypa/pip/issues/709 local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX) $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \ @@ -1146,8 +1146,8 @@ function screen_rc { } -# Helper to remove the *.failure files under $SERVICE_DIR/$SCREEN_NAME -# This is used for service_check when all the screen_it are called finished +# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``. +# This is used for ``service_check`` when all the ``screen_it`` are called finished # init_service_check function init_service_check() { SCREEN_NAME=${SCREEN_NAME:-stack} @@ -1301,10 +1301,12 @@ function trueorfalse() { } -# Retrieve an image from a URL and upload into Glance +# Retrieve an image from a URL and upload into Glance. # Uses the following variables: -# ``FILES`` must be set to the cache dir -# ``GLANCE_HOSTPORT`` +# +# - ``FILES`` must be set to the cache dir +# - ``GLANCE_HOSTPORT`` +# # upload_image image-url glance-token function upload_image() { local image_url=$1 @@ -1466,7 +1468,8 @@ function upload_image() { # When called from stackrc/localrc DATABASE_BACKENDS has not been # initialized yet, just save the configuration selection and call back later # to validate it. -# $1 The name of the database backend to use (mysql, postgresql, ...) +# +# ``$1`` - the name of the database backend to use (mysql, postgresql, ...) function use_database { if [[ -z "$DATABASE_BACKENDS" ]]; then # No backends registered means this is likely called from ``localrc`` @@ -1507,7 +1510,7 @@ function wait_for_service() { # Wrapper for ``yum`` to set proxy environment variables -# Uses globals ``OFFLINE``, ``*_proxy` +# Uses globals ``OFFLINE``, ``*_proxy`` # yum_install package [package ...] function yum_install() { [[ "$OFFLINE" = "True" ]] && return diff --git a/lib/baremetal b/lib/baremetal index 141c28d15f..5606230eac 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -83,8 +83,10 @@ BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-} # To provide PXE, configure nova-network's dnsmasq rather than run the one # dedicated to baremetal. When enable this, make sure these conditions are # fulfilled: +# # 1) nova-compute and nova-network runs on the same host # 2) nova-network uses FlatDHCPManager +# # NOTE: the other BM_DNSMASQ_* have no effect on the behavior if this option # is enabled. BM_DNSMASQ_FROM_NOVA_NETWORK=`trueorfalse False $BM_DNSMASQ_FROM_NOVA_NETWORK` @@ -198,8 +200,8 @@ function create_fake_baremetal_env() { BM_FIRST_MAC=$(sudo $bm_poseur get-macs) # NOTE: there is currently a limitation in baremetal driver - # that requires second MAC even if it is not used. - # Passing a fake value allows this to work. + # that requires second MAC even if it is not used. + # Passing a fake value allows this to work. # TODO(deva): remove this after driver issue is fixed. BM_SECOND_MAC='12:34:56:78:90:12' } diff --git a/lib/ceilometer b/lib/ceilometer index cd4c4d8656..a471d9c7e6 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -2,9 +2,11 @@ # Install and start **Ceilometer** service # To enable a minimal set of Ceilometer services, add the following to localrc: +# # enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api # # To ensure Ceilometer alarming services are enabled also, further add to the localrc: +# # enable_service ceilometer-alarm-notifier ceilometer-alarm-evaluator # Dependencies: diff --git a/lib/database b/lib/database index 3c1560964c..c3fd435eb2 100644 --- a/lib/database +++ b/lib/database @@ -9,10 +9,11 @@ # This is a wrapper for the specific database backends available. # Each database must implement four functions: -# recreate_database_$DATABASE_TYPE -# install_database_$DATABASE_TYPE -# configure_database_$DATABASE_TYPE -# database_connection_url_$DATABASE_TYPE +# +# - recreate_database_$DATABASE_TYPE +# - install_database_$DATABASE_TYPE +# - configure_database_$DATABASE_TYPE +# - database_connection_url_$DATABASE_TYPE # # and call register_database $DATABASE_TYPE @@ -22,7 +23,9 @@ set +o xtrace # Register a database backend -# $1 The name of the database backend +# +# $1 The name of the database backend +# # This is required to be defined before the specific database scripts are sourced function register_database { [ -z "$DATABASE_BACKENDS" ] && DATABASE_BACKENDS=$1 || DATABASE_BACKENDS+=" $1" diff --git a/lib/neutron b/lib/neutron index 44fb9e1005..00852df05a 100644 --- a/lib/neutron +++ b/lib/neutron @@ -208,7 +208,7 @@ source $TOP_DIR/lib/neutron_plugins/services/loadbalancer source $TOP_DIR/lib/neutron_plugins/services/vpn # Firewall Service Plugin functions -# -------------------------------- +# --------------------------------- source $TOP_DIR/lib/neutron_plugins/services/firewall # Use security group or not @@ -494,6 +494,7 @@ function _configure_neutron_common() { # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. # For addition plugin config files, set ``Q_PLUGIN_EXTRA_CONF_PATH``, # ``Q_PLUGIN_EXTRA_CONF_FILES``. For example: + # # ``Q_PLUGIN_EXTRA_CONF_FILES=(file1, file2)`` neutron_plugin_configure_common diff --git a/lib/swift b/lib/swift index 8726f1e7fc..3c3b8b1d38 100644 --- a/lib/swift +++ b/lib/swift @@ -268,8 +268,8 @@ function configure_swift() { # By default Swift will be installed with keystone and tempauth middleware # and add the swift3 middleware if its configured for it. The token for - # tempauth would be prefixed with the reseller_prefix setting TEMPAUTH_ the - # token for keystoneauth would have the standard reseller_prefix AUTH_ + # tempauth would be prefixed with the reseller_prefix setting `TEMPAUTH_` the + # token for keystoneauth would have the standard reseller_prefix `AUTH_` if is_service_enabled swift3;then swift_pipeline=" swift3 s3token " fi diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh index 2c45568531..3d9f76f4a5 100755 --- a/tools/build_ramdisk.sh +++ b/tools/build_ramdisk.sh @@ -84,11 +84,10 @@ if [ ! -r $CACHEDIR/$DIST_NAME-base.img ]; then $TOOLS_DIR/get_uec_image.sh $DIST_NAME $CACHEDIR/$DIST_NAME-base.img fi -# Finds the next available NBD device -# Exits script if error connecting or none free +# Finds and returns full device path for the next available NBD device. +# Exits script if error connecting or none free. # map_nbd image -# Returns full nbd device path -function map_nbd { +function map_nbd() { for i in `seq 0 15`; do if [ ! -e /sys/block/nbd$i/pid ]; then NBD=/dev/nbd$i @@ -156,7 +155,7 @@ if [ ! -r $IMG_FILE ]; then # Pre-create the image file # FIXME(dt): This should really get the partition size to - # pre-create the image file + # pre-create the image file dd if=/dev/zero of=$IMG_FILE_TMP bs=1 count=1 seek=$((2*1024*1024*1024)) # Create filesystem image for RAM disk dd if=${NBD}p1 of=$IMG_FILE_TMP bs=1M diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh index 2251d1e67c..50f6592a3a 100755 --- a/tools/create-stack-user.sh +++ b/tools/create-stack-user.sh @@ -5,7 +5,9 @@ # Create a user account suitable for running DevStack # - create a group named $STACK_USER if it does not exist # - create a user named $STACK_USER if it does not exist +# # - home is $DEST +# # - configure sudo for $STACK_USER # ``stack.sh`` was never intended to run as root. It had a hack to do what is diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 9e65b7c21e..325a6d6be1 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -5,11 +5,15 @@ # fixup_stuff.sh # # All distro and package specific hacks go in here +# # - prettytable 0.7.2 permissions are 600 in the package and # pip 1.4 doesn't fix it (1.3 did) +# # - httplib2 0.8 permissions are 600 in the package and # pip 1.4 doesn't fix it (1.3 did) +# # - RHEL6: +# # - set selinux not enforcing # - (re)start messagebus daemon # - remove distro packages python-crypto and python-lxml @@ -90,7 +94,7 @@ if [[ $DISTRO =~ (rhel6) ]]; then # fresh system via Anaconda and the dependency chain # ``cas`` -> ``python-paramiko`` -> ``python-crypto``. # ``pip uninstall pycrypto`` will remove the packaged ``.egg-info`` - # file but leave most of the actual library files behind in + # file but leave most of the actual library files behind in # ``/usr/lib64/python2.6/Crypto``. Later ``pip install pycrypto`` # will install over the packaged files resulting # in a useless mess of old, rpm-packaged files and pip-installed files. From 6730a9d1c67a8740611c972aad1e3d2c5feebebb Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 24 Oct 2013 15:28:34 +0000 Subject: [PATCH 0209/4438] Handle the CM service availability in tempest This patch handle the ceilometer service availability in tempest. Change-Id: Ib0d1d7b858ff327785ebbcc27d7f920fb4a32444 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 8e4e5210ea..b3df1398cc 100644 --- a/lib/tempest +++ b/lib/tempest @@ -296,7 +296,7 @@ function configure_tempest() { iniset $TEMPEST_CONF cli cli_dir $NOVA_BIN_DIR # service_available - for service in nova cinder glance neutron swift heat horizon ; do + for service in nova cinder glance neutron swift heat horizon ceilometer; do if is_service_enabled $service ; then iniset $TEMPEST_CONF service_available $service "True" else From 1ed64cbbf09d76df0b1ce0d5095373c2bf1053c6 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 23 Oct 2013 10:37:05 +0200 Subject: [PATCH 0210/4438] Ensure cm-api is ready before start cm-alarm The patch ensure that ceilometer-api is ready before starting the ceilometer-alarm-evaluator service. This ensure that ceilometer-alarm-evaluator doesn't log a error message on startup due to not yet available ceilometer-api. Closes bug: #1243249 Change-Id: Icff3e972ec485f26c014071f68079593a14b7240 --- lib/ceilometer | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/ceilometer b/lib/ceilometer index cd4c4d8656..06f215e9d8 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -138,6 +138,12 @@ function start_ceilometer() { screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF" screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF" screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" + + echo "Waiting for ceilometer-api to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then + die $LINENO "ceilometer-api did not start" + fi + screen_it ceilometer-alarm-notifier "cd ; ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" screen_it ceilometer-alarm-evaluator "cd ; ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF" } From 6fa67c99ba687f659fab0ad3f965993d833ca2b4 Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Thu, 24 Oct 2013 16:32:21 +0100 Subject: [PATCH 0211/4438] git-ignore files generated by "./tools/build_docs.sh ." Change-Id: Ibf190998e52e7814ddc7f7ab4cf174aee28df9bf --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 798b0814c9..0c22c6b62a 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,5 @@ stack-screenrc accrc .stackenv .prereqs +docs/ +docs-files From 6a5aa7c6a20435bbd276a0f1823396b52a8f0daf Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Thu, 24 Oct 2013 11:27:02 +0100 Subject: [PATCH 0212/4438] Fix some Markdown formatting issues Address miscellaneous issues with Markdown formatting in comments which are consumed by shocco when generating the online documentation. Change-Id: I953075cdbddbf1f119c6c7e35f039e2e54b79078 --- exercises/aggregates.sh | 13 ++--- exercises/boot_from_volume.sh | 5 +- functions | 21 ++++---- lib/apache | 26 +++++----- lib/baremetal | 78 ++++++++++++++++-------------- lib/ceilometer | 20 ++++---- lib/cinder | 30 +++++++----- lib/database | 7 +-- lib/databases/mysql | 3 +- lib/databases/postgresql | 3 +- lib/glance | 30 ++++++------ lib/heat | 27 ++++++----- lib/horizon | 30 ++++++------ lib/infra | 14 +++--- lib/ironic | 31 ++++++------ lib/keystone | 40 +++++++-------- lib/ldap | 10 ++-- lib/neutron | 37 +++++++------- lib/nova | 34 +++++++------ lib/nova_plugins/hypervisor-docker | 8 +-- lib/oslo | 12 +++-- lib/rpc_backend | 20 ++++---- lib/savanna-dashboard | 13 ++--- lib/swift | 37 +++++++------- lib/tempest | 61 ++++++++++++----------- lib/template | 26 +++++----- lib/tls | 36 ++++++++------ lib/trove | 7 +-- 28 files changed, 367 insertions(+), 312 deletions(-) diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index e5fc7dec84..96241f9b34 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -3,12 +3,13 @@ # **aggregates.sh** # This script demonstrates how to use host aggregates: -# * Create an Aggregate -# * Updating Aggregate details -# * Testing Aggregate metadata -# * Testing Aggregate delete -# * Testing General Aggregates (https://blueprints.launchpad.net/nova/+spec/general-host-aggregates) -# * Testing add/remove hosts (with one host) +# +# * Create an Aggregate +# * Updating Aggregate details +# * Testing Aggregate metadata +# * Testing Aggregate delete +# * Testing General Aggregates (https://blueprints.launchpad.net/nova/+spec/general-host-aggregates) +# * Testing add/remove hosts (with one host) echo "**************************************************" echo "Begin DevStack Exercise: $0" diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 634a6d526c..3b3d3ba63b 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -3,8 +3,9 @@ # **boot_from_volume.sh** # This script demonstrates how to boot from a volume. It does the following: -# * Create a bootable volume -# * Boot a volume-backed instance +# +# * Create a bootable volume +# * Boot a volume-backed instance echo "*********************************************************************" echo "Begin DevStack Exercise: $0" diff --git a/functions b/functions index 8d076b72e5..f4fb8065db 100644 --- a/functions +++ b/functions @@ -1,16 +1,17 @@ # functions - Common functions used by DevStack components # # The following variables are assumed to be defined by certain functions: -# ``ENABLED_SERVICES`` -# ``ERROR_ON_CLONE`` -# ``FILES`` -# ``GLANCE_HOSTPORT`` -# ``OFFLINE`` -# ``PIP_DOWNLOAD_CACHE`` -# ``PIP_USE_MIRRORS`` -# ``RECLONE`` -# ``TRACK_DEPENDS`` -# ``http_proxy``, ``https_proxy``, ``no_proxy`` +# +# - ``ENABLED_SERVICES`` +# - ``ERROR_ON_CLONE`` +# - ``FILES`` +# - ``GLANCE_HOSTPORT`` +# - ``OFFLINE`` +# - ``PIP_DOWNLOAD_CACHE`` +# - ``PIP_USE_MIRRORS`` +# - ``RECLONE`` +# - ``TRACK_DEPENDS`` +# - ``http_proxy``, ``https_proxy``, ``no_proxy`` # Save trace setting diff --git a/lib/apache b/lib/apache index 3a1f6f1263..41d6fcc381 100644 --- a/lib/apache +++ b/lib/apache @@ -2,15 +2,16 @@ # Functions to control configuration and operation of apache web server # Dependencies: -# ``functions`` file -# is_apache_enabled_service -# install_apache_wsgi -# config_apache_wsgi -# enable_apache_site -# disable_apache_site -# start_apache_server -# stop_apache_server -# restart_apache_server +# +# - ``functions`` file +# - is_apache_enabled_service +# - install_apache_wsgi +# - config_apache_wsgi +# - enable_apache_site +# - disable_apache_site +# - start_apache_server +# - stop_apache_server +# - restart_apache_server # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -116,6 +117,7 @@ function restart_apache_server() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/baremetal b/lib/baremetal index 5606230eac..a0df85e700 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -1,19 +1,19 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2012 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +## vim: tabstop=4 shiftwidth=4 softtabstop=4 + +## Copyright (c) 2012 Hewlett-Packard Development Company, L.P. +## All Rights Reserved. +## +## Licensed under the Apache License, Version 2.0 (the "License"); you may +## not use this file except in compliance with the License. You may obtain +## a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +## License for the specific language governing permissions and limitations +## under the License. # This file provides devstack with the environment and utilities to @@ -24,7 +24,8 @@ # control physical hardware resources on the same network, if you know # the MAC address(es) and IPMI credentials. # -# At a minimum, to enable the baremetal driver, you must set these in loclarc: +# At a minimum, to enable the baremetal driver, you must set these in localrc: +# # VIRT_DRIVER=baremetal # ENABLED_SERVICES="$ENABLED_SERVICES,baremetal" # @@ -38,11 +39,13 @@ # Below that, various functions are defined, which are called by devstack # in the following order: # -# before nova-cpu starts: +# before nova-cpu starts: +# # - prepare_baremetal_toolchain # - configure_baremetal_nova_dirs # -# after nova and glance have started: +# after nova and glance have started: +# # - build_and_upload_baremetal_deploy_k_and_r $token # - create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID # - upload_baremetal_image $url $token @@ -58,11 +61,13 @@ set +o xtrace # ------------------- # sub-driver to use for kernel deployment -# - nova.virt.baremetal.pxe.PXE -# - nova.virt.baremetal.tilera.TILERA +# +# - nova.virt.baremetal.pxe.PXE +# - nova.virt.baremetal.tilera.TILERA BM_DRIVER=${BM_DRIVER:-nova.virt.baremetal.pxe.PXE} # sub-driver to use for remote power management +# # - nova.virt.baremetal.fake.FakePowerManager, for manual power control # - nova.virt.baremetal.ipmi.IPMI, for remote IPMI # - nova.virt.baremetal.tilera_pdu.Pdu, for TilePro hardware @@ -84,11 +89,11 @@ BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-} # dedicated to baremetal. When enable this, make sure these conditions are # fulfilled: # -# 1) nova-compute and nova-network runs on the same host -# 2) nova-network uses FlatDHCPManager +# 1) nova-compute and nova-network runs on the same host +# 2) nova-network uses FlatDHCPManager # # NOTE: the other BM_DNSMASQ_* have no effect on the behavior if this option -# is enabled. +# is enabled. BM_DNSMASQ_FROM_NOVA_NETWORK=`trueorfalse False $BM_DNSMASQ_FROM_NOVA_NETWORK` # BM_DNSMASQ_IFACE should match FLAT_NETWORK_BRIDGE @@ -105,9 +110,9 @@ fi # BM_DNSMASQ_DNS provide dns server to bootstrap clients BM_DNSMASQ_DNS=${BM_DNSMASQ_DNS:-} -# BM_FIRST_MAC *must* be set to the MAC address of the node you will boot. -# This is passed to dnsmasq along with the kernel/ramdisk to -# deploy via PXE. +# BM_FIRST_MAC *must* be set to the MAC address of the node you will +# boot. This is passed to dnsmasq along with the kernel/ramdisk to +# deploy via PXE. BM_FIRST_MAC=${BM_FIRST_MAC:-} # BM_SECOND_MAC is only important if the host has >1 NIC. @@ -121,9 +126,9 @@ BM_PM_ADDR=${BM_PM_ADDR:-0.0.0.0} BM_PM_USER=${BM_PM_USER:-user} BM_PM_PASS=${BM_PM_PASS:-pass} -# BM_FLAVOR_* options are arbitrary and not necessarily related to physical -# hardware capacity. These can be changed if you are testing -# BaremetalHostManager with multiple nodes and different flavors. +# BM_FLAVOR_* options are arbitrary and not necessarily related to +# physical hardware capacity. These can be changed if you are testing +# BaremetalHostManager with multiple nodes and different flavors. BM_CPU_ARCH=${BM_CPU_ARCH:-x86_64} BM_FLAVOR_CPU=${BM_FLAVOR_CPU:-1} BM_FLAVOR_RAM=${BM_FLAVOR_RAM:-1024} @@ -288,8 +293,8 @@ function create_baremetal_flavor() { } -# pull run-time kernel/ramdisk out of disk image and load into glance -# note that $file is currently expected to be in qcow2 format +# Pull run-time kernel/ramdisk out of disk image and load into glance. +# Note that $file is currently expected to be in qcow2 format. # Sets KERNEL_ID and RAMDISK_ID # # Usage: extract_and_upload_k_and_r_from_image $token $file @@ -432,7 +437,7 @@ function clear_baremetal_of_all_nodes() { done } -# inform nova-baremetal about nodes, MACs, etc +# Inform nova-baremetal about nodes, MACs, etc. # Defaults to using BM_FIRST_MAC and BM_SECOND_MAC if parameters not specified # # Usage: add_baremetal_node @@ -461,6 +466,7 @@ function add_baremetal_node() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/ceilometer b/lib/ceilometer index a471d9c7e6..9257611fc6 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -10,6 +10,7 @@ # enable_service ceilometer-alarm-notifier ceilometer-alarm-evaluator # Dependencies: +# # - functions # - OS_AUTH_URL for auth in api # - DEST set to the destination directory @@ -18,12 +19,12 @@ # stack.sh # --------- -# install_ceilometer -# configure_ceilometer -# init_ceilometer -# start_ceilometer -# stop_ceilometer -# cleanup_ceilometer +# - install_ceilometer +# - configure_ceilometer +# - init_ceilometer +# - start_ceilometer +# - stop_ceilometer +# - cleanup_ceilometer # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -156,6 +157,7 @@ function stop_ceilometer() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/cinder b/lib/cinder index f6f137cabd..ae0e28c544 100644 --- a/lib/cinder +++ b/lib/cinder @@ -2,19 +2,20 @@ # Install and start **Cinder** volume service # Dependencies: +# # - functions # - DEST, DATA_DIR, STACK_USER must be defined -# SERVICE_{TENANT_NAME|PASSWORD} must be defined -# ``KEYSTONE_TOKEN_FORMAT`` must be defined +# - SERVICE_{TENANT_NAME|PASSWORD} must be defined +# - ``KEYSTONE_TOKEN_FORMAT`` must be defined # stack.sh # --------- -# install_cinder -# configure_cinder -# init_cinder -# start_cinder -# stop_cinder -# cleanup_cinder +# - install_cinder +# - configure_cinder +# - init_cinder +# - start_cinder +# - stop_cinder +# - cleanup_cinder # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -82,7 +83,8 @@ VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} # Functions # --------- # _clean_lvm_lv removes all cinder LVM volumes -# _clean_lvm_lv $VOLUME_GROUP $VOLUME_NAME_PREFIX +# +# Usage: _clean_lvm_lv $VOLUME_GROUP $VOLUME_NAME_PREFIX function _clean_lvm_lv() { local vg=$1 local lv_prefix=$2 @@ -98,7 +100,8 @@ function _clean_lvm_lv() { # _clean_lvm_backing_file() removes the backing file of the # volume group used by cinder -# _clean_lvm_backing_file() $VOLUME_GROUP +# +# Usage: _clean_lvm_backing_file() $VOLUME_GROUP function _clean_lvm_backing_file() { local vg=$1 @@ -546,6 +549,7 @@ function stop_cinder() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/database b/lib/database index c3fd435eb2..0661049e70 100644 --- a/lib/database +++ b/lib/database @@ -124,6 +124,7 @@ function database_connection_url { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/databases/mysql b/lib/databases/mysql index 41e3236f69..0eb8fdd7a2 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -2,7 +2,8 @@ # Functions to control the configuration and operation of the **MySQL** database backend # Dependencies: -# DATABASE_{HOST,USER,PASSWORD} must be defined +# +# - DATABASE_{HOST,USER,PASSWORD} must be defined # Save trace setting MY_XTRACE=$(set +o | grep xtrace) diff --git a/lib/databases/postgresql b/lib/databases/postgresql index b173772170..519479ad68 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -2,7 +2,8 @@ # Functions to control the configuration and operation of the **PostgreSQL** database backend # Dependencies: -# DATABASE_{HOST,USER,PASSWORD} must be defined +# +# - DATABASE_{HOST,USER,PASSWORD} must be defined # Save trace setting PG_XTRACE=$(set +o | grep xtrace) diff --git a/lib/glance b/lib/glance index 75e3dd053d..eb727f1e2a 100644 --- a/lib/glance +++ b/lib/glance @@ -2,20 +2,21 @@ # Functions to control the configuration and operation of the **Glance** service # Dependencies: -# ``functions`` file -# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined -# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined -# ``SERVICE_HOST`` -# ``KEYSTONE_TOKEN_FORMAT`` must be defined +# +# - ``functions`` file +# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# - ``SERVICE_HOST`` +# - ``KEYSTONE_TOKEN_FORMAT`` must be defined # ``stack.sh`` calls the entry points in this order: # -# install_glance -# configure_glance -# init_glance -# start_glance -# stop_glance -# cleanup_glance +# - install_glance +# - configure_glance +# - init_glance +# - start_glance +# - stop_glance +# - cleanup_glance # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -209,6 +210,7 @@ function stop_glance() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/heat b/lib/heat index 8acadb4ad1..bf4d4bce9c 100644 --- a/lib/heat +++ b/lib/heat @@ -2,21 +2,23 @@ # Install and start **Heat** service # To enable, add the following to localrc -# ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-api-cw,h-eng +# +# ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-api-cw,h-eng # Dependencies: +# # - functions # stack.sh # --------- -# install_heatclient -# install_heat -# configure_heatclient -# configure_heat -# init_heat -# start_heat -# stop_heat -# cleanup_heat +# - install_heatclient +# - install_heat +# - configure_heatclient +# - configure_heat +# - init_heat +# - start_heat +# - stop_heat +# - cleanup_heat # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -198,6 +200,7 @@ function disk_image_create { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/horizon b/lib/horizon index 63caf3c0f2..af0db49da5 100644 --- a/lib/horizon +++ b/lib/horizon @@ -1,21 +1,20 @@ # lib/horizon # Functions to control the configuration and operation of the horizon service -# # Dependencies: -# ``functions`` file -# ``apache`` file -# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined -# +# +# - ``functions`` file +# - ``apache`` file +# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # ``stack.sh`` calls the entry points in this order: # -# install_horizon -# configure_horizon -# init_horizon -# start_horizon -# stop_horizon -# cleanup_horizon +# - install_horizon +# - configure_horizon +# - init_horizon +# - start_horizon +# - stop_horizon +# - cleanup_horizon # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -25,8 +24,6 @@ set +o xtrace # Defaults # -------- -# - # Set up default directories HORIZON_DIR=$DEST/horizon @@ -183,6 +180,7 @@ function stop_horizon() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/infra b/lib/infra index 0b732598ff..0dcf0ad980 100644 --- a/lib/infra +++ b/lib/infra @@ -5,12 +5,13 @@ # requirements as a global list # Dependencies: -# ``functions`` file +# +# - ``functions`` file # ``stack.sh`` calls the entry points in this order: # -# unfubar_setuptools -# install_infra +# - unfubar_setuptools +# - install_infra # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -51,6 +52,7 @@ function install_infra() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/ironic b/lib/ironic index 649c1c2cd6..ff99e58c19 100644 --- a/lib/ironic +++ b/lib/ironic @@ -2,21 +2,21 @@ # Functions to control the configuration and operation of the **Ironic** service # Dependencies: -# ``functions`` file -# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined -# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined -# ``SERVICE_HOST`` -# ``KEYSTONE_TOKEN_FORMAT`` must be defined +# +# - ``functions`` file +# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# - ``SERVICE_HOST`` +# - ``KEYSTONE_TOKEN_FORMAT`` must be defined # ``stack.sh`` calls the entry points in this order: # -# install_ironic -# install_ironicclient -# configure_ironic -# init_ironic -# start_ironic -# stop_ironic -# cleanup_ironic +# - install_ironic +# - install_ironicclient +# - init_ironic +# - start_ironic +# - stop_ironic +# - cleanup_ironic # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -225,6 +225,7 @@ function stop_ironic() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/keystone b/lib/keystone index beddb1cd75..7011f66e99 100755 --- a/lib/keystone +++ b/lib/keystone @@ -2,25 +2,26 @@ # Functions to control the configuration and operation of **Keystone** # Dependencies: -# ``functions`` file -# ``DEST``, ``STACK_USER`` -# ``IDENTITY_API_VERSION`` -# ``BASE_SQL_CONN`` -# ``SERVICE_HOST``, ``SERVICE_PROTOCOL`` -# ``SERVICE_TOKEN`` -# ``S3_SERVICE_PORT`` (template backend only) +# +# - ``functions`` file +# - ``DEST``, ``STACK_USER`` +# - ``IDENTITY_API_VERSION`` +# - ``BASE_SQL_CONN`` +# - ``SERVICE_HOST``, ``SERVICE_PROTOCOL`` +# - ``SERVICE_TOKEN`` +# - ``S3_SERVICE_PORT`` (template backend only) # ``stack.sh`` calls the entry points in this order: # -# install_keystone -# configure_keystone -# _config_keystone_apache_wsgi -# init_keystone -# start_keystone -# create_keystone_accounts -# stop_keystone -# cleanup_keystone -# _cleanup_keystone_apache_wsgi +# - install_keystone +# - configure_keystone +# - _config_keystone_apache_wsgi +# - init_keystone +# - start_keystone +# - create_keystone_accounts +# - stop_keystone +# - cleanup_keystone +# - _cleanup_keystone_apache_wsgi # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -393,6 +394,7 @@ function stop_keystone() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/ldap b/lib/ldap index 2a24ccddf7..80992a7a09 100644 --- a/lib/ldap +++ b/lib/ldap @@ -2,7 +2,8 @@ # Functions to control the installation and configuration of **ldap** # ``lib/keystone`` calls the entry points in this order: -# install_ldap() +# +# - install_ldap() # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -91,6 +92,7 @@ function clear_ldap_state() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/neutron b/lib/neutron index 00852df05a..50bdb74217 100644 --- a/lib/neutron +++ b/lib/neutron @@ -7,24 +7,24 @@ # ``stack.sh`` calls the entry points in this order: # -# install_neutron -# install_neutronclient -# install_neutron_agent_packages -# install_neutron_third_party -# configure_neutron -# init_neutron -# configure_neutron_third_party -# init_neutron_third_party -# start_neutron_third_party -# create_nova_conf_neutron -# start_neutron_service_and_check -# create_neutron_initial_network -# setup_neutron_debug -# start_neutron_agents +# - install_neutron +# - install_neutronclient +# - install_neutron_agent_packages +# - install_neutron_third_party +# - configure_neutron +# - init_neutron +# - configure_neutron_third_party +# - init_neutron_third_party +# - start_neutron_third_party +# - create_nova_conf_neutron +# - start_neutron_service_and_check +# - create_neutron_initial_network +# - setup_neutron_debug +# - start_neutron_agents # # ``unstack.sh`` calls the entry points in this order: # -# stop_neutron +# - stop_neutron # Functions in lib/neutron are classified into the following categories: # @@ -891,6 +891,7 @@ function stop_neutron_third_party() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/nova b/lib/nova index 809f8e5896..98b32ead18 100644 --- a/lib/nova +++ b/lib/nova @@ -2,22 +2,23 @@ # Functions to control the configuration and operation of the **Nova** service # Dependencies: -# ``functions`` file -# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined -# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined -# ``LIBVIRT_TYPE`` must be defined -# ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined -# ``KEYSTONE_TOKEN_FORMAT`` must be defined +# +# - ``functions`` file +# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# - ``LIBVIRT_TYPE`` must be defined +# - ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined +# - ``KEYSTONE_TOKEN_FORMAT`` must be defined # ``stack.sh`` calls the entry points in this order: # -# install_nova -# configure_nova -# create_nova_conf -# init_nova -# start_nova -# stop_nova -# cleanup_nova +# - install_nova +# - configure_nova +# - create_nova_conf +# - init_nova +# - start_nova +# - stop_nova +# - cleanup_nova # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -683,6 +684,7 @@ function stop_nova() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index 427554b7db..300522fb48 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -2,11 +2,13 @@ # Configure the Docker hypervisor # Enable with: -# VIRT_DRIVER=docker +# +# VIRT_DRIVER=docker # Dependencies: -# ``functions`` file -# ``nova`` and ``glance`` configurations +# +# - ``functions`` file +# - ``nova`` and ``glance`` configurations # install_nova_hypervisor - install any external requirements # configure_nova_hypervisor - make configuration changes, including those to other services diff --git a/lib/oslo b/lib/oslo index f77a4fa941..816ae9a48a 100644 --- a/lib/oslo +++ b/lib/oslo @@ -6,11 +6,12 @@ # pre-released versions of oslo libraries. # Dependencies: -# ``functions`` file +# +# - ``functions`` file # ``stack.sh`` calls the entry points in this order: # -# install_oslo +# - install_oslo # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -52,6 +53,7 @@ function cleanup_oslo() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/rpc_backend b/lib/rpc_backend index a323d649a7..ae83e85e89 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -3,15 +3,16 @@ # rpc backend settings # Dependencies: -# ``functions`` file -# ``RABBIT_{HOST|PASSWORD}`` must be defined when RabbitMQ is used +# +# - ``functions`` file +# - ``RABBIT_{HOST|PASSWORD}`` must be defined when RabbitMQ is used # ``stack.sh`` calls the entry points in this order: # -# check_rpc_backend -# install_rpc_backend -# restart_rpc_backend -# iniset_rpc_backend +# - check_rpc_backend +# - install_rpc_backend +# - restart_rpc_backend +# - iniset_rpc_backend # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -200,6 +201,7 @@ function qpid_is_supported() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard index 9562db4e1c..e96762285c 100644 --- a/lib/savanna-dashboard +++ b/lib/savanna-dashboard @@ -1,15 +1,16 @@ # lib/savanna-dashboard # Dependencies: -# ``functions`` file -# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined -# ``SERVICE_HOST +# +# - ``functions`` file +# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# - ``SERVICE_HOST`` # ``stack.sh`` calls the entry points in this order: # -# install_savanna_dashboard -# configure_savanna_dashboard -# cleanup_savanna_dashboard +# - install_savanna_dashboard +# - configure_savanna_dashboard +# - cleanup_savanna_dashboard # Save trace setting XTRACE=$(set +o | grep xtrace) diff --git a/lib/swift b/lib/swift index 3c3b8b1d38..db6ae18bef 100644 --- a/lib/swift +++ b/lib/swift @@ -2,22 +2,24 @@ # Functions to control the configuration and operation of the **Swift** service # Dependencies: -# ``functions`` file -# ``apache`` file -# ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined -# ``STACK_USER`` must be defined -# ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined -# ``lib/keystone`` file +# +# - ``functions`` file +# - ``apache`` file +# - ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined +# - ``STACK_USER`` must be defined +# - ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined +# - ``lib/keystone`` file +# # ``stack.sh`` calls the entry points in this order: # -# install_swift -# _config_swift_apache_wsgi -# configure_swift -# init_swift -# start_swift -# stop_swift -# cleanup_swift -# _cleanup_swift_apache_wsgi +# - install_swift +# - _config_swift_apache_wsgi +# - configure_swift +# - init_swift +# - start_swift +# - stop_swift +# - cleanup_swift +# - _cleanup_swift_apache_wsgi # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -617,6 +619,7 @@ function stop_swift() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/tempest b/lib/tempest index 8e4e5210ea..25814b6250 100644 --- a/lib/tempest +++ b/lib/tempest @@ -2,34 +2,38 @@ # Install and configure Tempest # Dependencies: -# ``functions`` file -# ``lib/nova`` service is running -# -# - ``DEST``, ``FILES`` -# - ``ADMIN_PASSWORD`` -# - ``DEFAULT_IMAGE_NAME`` -# - ``S3_SERVICE_PORT`` -# - ``SERVICE_HOST`` -# - ``BASE_SQL_CONN`` ``lib/database`` declares -# - ``PUBLIC_NETWORK_NAME`` -# - ``Q_USE_NAMESPACE`` -# - ``Q_ROUTER_NAME`` -# - ``VIRT_DRIVER`` -# - ``LIBVIRT_TYPE`` -# - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone +# +# - ``functions`` file +# - ``lib/nova`` service is running +# - Global vars that are assumed to be defined: +# - ``DEST``, ``FILES`` +# - ``ADMIN_PASSWORD`` +# - ``DEFAULT_IMAGE_NAME`` +# - ``S3_SERVICE_PORT`` +# - ``SERVICE_HOST`` +# - ``BASE_SQL_CONN`` ``lib/database`` declares +# - ``PUBLIC_NETWORK_NAME`` +# - ``Q_USE_NAMESPACE`` +# - ``Q_ROUTER_NAME`` +# - ``VIRT_DRIVER`` +# - ``LIBVIRT_TYPE`` +# - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone +# # Optional Dependencies: -# ALT_* (similar vars exists in keystone_data.sh) -# ``LIVE_MIGRATION_AVAILABLE`` -# ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION`` -# ``DEFAULT_INSTANCE_TYPE`` -# ``DEFAULT_INSTANCE_USER`` -# ``CINDER_MULTI_LVM_BACKEND`` -# ``HEAT_CREATE_TEST_IMAGE`` +# +# - ``ALT_*`` (similar vars exists in keystone_data.sh) +# - ``LIVE_MIGRATION_AVAILABLE`` +# - ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION`` +# - ``DEFAULT_INSTANCE_TYPE`` +# - ``DEFAULT_INSTANCE_USER`` +# - ``CINDER_MULTI_LVM_BACKEND`` +# - ``HEAT_CREATE_TEST_IMAGE`` +# # ``stack.sh`` calls the entry points in this order: # -# install_tempest -# configure_tempest -# init_tempest +# - install_tempest +# - configure_tempest +# - init_tempest # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -345,6 +349,7 @@ function init_tempest() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/template b/lib/template index 72904fe1c6..629e110271 100644 --- a/lib/template +++ b/lib/template @@ -3,18 +3,19 @@ # # Dependencies: -# ``functions`` file -# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined -# +# +# - ``functions`` file +# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# - # ``stack.sh`` calls the entry points in this order: # -# install_XXXX -# configure_XXXX -# init_XXXX -# start_XXXX -# stop_XXXX -# cleanup_XXXX +# - install_XXXX +# - configure_XXXX +# - init_XXXX +# - start_XXXX +# - stop_XXXX +# - cleanup_XXXX # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -79,6 +80,7 @@ function stop_XXXX() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/tls b/lib/tls index f7dcffa32d..a1a7fddc18 100644 --- a/lib/tls +++ b/lib/tls @@ -1,24 +1,27 @@ # lib/tls # Functions to control the configuration and operation of the TLS proxy service -# Dependencies: # !! source _before_ any services that use ``SERVICE_HOST`` -# ``functions`` file -# ``DEST``, ``DATA_DIR`` must be defined -# ``HOST_IP``, ``SERVICE_HOST`` -# ``KEYSTONE_TOKEN_FORMAT`` must be defined +# +# Dependencies: +# +# - ``functions`` file +# - ``DEST``, ``DATA_DIR`` must be defined +# - ``HOST_IP``, ``SERVICE_HOST`` +# - ``KEYSTONE_TOKEN_FORMAT`` must be defined # Entry points: -# configure_CA -# init_CA +# +# - configure_CA +# - init_CA -# configure_proxy -# start_tls_proxy +# - configure_proxy +# - start_tls_proxy -# make_root_ca -# make_int_ca -# new_cert $INT_CA_DIR int-server "abc" -# start_tls_proxy HOST_IP 5000 localhost 5000 +# - make_root_ca +# - make_int_ca +# - new_cert $INT_CA_DIR int-server "abc" +# - start_tls_proxy HOST_IP 5000 localhost 5000 # Defaults @@ -321,6 +324,7 @@ function start_tls_proxy() { } -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/trove b/lib/trove index 0a19d031ac..c40006bf5d 100644 --- a/lib/trove +++ b/lib/trove @@ -181,6 +181,7 @@ function stop_trove() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: From 4b2c5ed209514534a0ead54cd3c91fc3b42d6194 Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Thu, 24 Oct 2013 17:40:13 +0100 Subject: [PATCH 0213/4438] only clone docs from gh-pages branch if they aren't there Allow the caller of this script to decide which commit from the gh-pages branch should be used to build the docs. This also avoid excessive repeated git clones during development. Change-Id: I3e58eef0ac03b15903c06d5632c0eb41413db02c --- tools/build_docs.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/build_docs.sh b/tools/build_docs.sh index 216e557025..1c145e237f 100755 --- a/tools/build_docs.sh +++ b/tools/build_docs.sh @@ -91,8 +91,9 @@ fi # Assumption is we are now in the DevStack repo workspace to be processed # Pull the latest docs branch from devstack.org repo -rm -rf docs || true -git clone -b gh-pages $GH_PAGES_REPO docs +if ! [ -d docs ]; then + git clone -b gh-pages $GH_PAGES_REPO docs +fi # Build list of scripts to process FILES="" From fd98edb469884610031207695ec91c2db8c7ab93 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 24 Oct 2013 14:57:59 -0400 Subject: [PATCH 0214/4438] create an UNDO_REQUIREMENTS variable in the gate, we actually need to *not* roll back the requirements, otherwise tempest can wedge the world because it runs under tox. Make a variable that we can set to false in the gate to ensure that we don't roll back the requirements changes. Change-Id: I2b842ecc3f6e8b917dd721729640000bd7e7fb78 --- functions | 15 ++++++++++++--- stackrc | 3 +++ 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/functions b/functions index c707e556c6..bece5a4a56 100644 --- a/functions +++ b/functions @@ -1257,9 +1257,18 @@ function setup_develop() { # ensure that further actions can do things like setup.py sdist safe_chown -R $STACK_USER $1/*.egg-info - # Undo requirements changes, if we made them - if [ $update_requirements -eq 0 ]; then - (cd $project_dir && git reset --hard) + # We've just gone and possibly modified the user's source tree in an + # automated way, which is considered bad form if it's a development + # tree because we've screwed up their next git checkin. So undo it. + # + # However... there are some circumstances, like running in the gate + # where we really really want the overridden version to stick. So provide + # a variable that tells us whether or not we should UNDO the requirements + # changes (this will be set to False in the OpenStack ci gate) + if [ $UNDO_REQUIREMENTS = "True"]; then + if [ $update_requirements -eq 0 ]; then + (cd $project_dir && git reset --hard) + fi fi } diff --git a/stackrc b/stackrc index 0151672c1d..7069327dd7 100644 --- a/stackrc +++ b/stackrc @@ -297,6 +297,9 @@ SCREEN_NAME=${SCREEN_NAME:-stack} # Do not install packages tagged with 'testonly' by default INSTALL_TESTONLY_PACKAGES=${INSTALL_TESTONLY_PACKAGES:-False} +# Undo requirements changes by global requirements +UNDO_REQUIREMENTS=${UNDO_REQUIREMENTS:-True} + # Local variables: # mode: shell-script # End: From 5a77d03addeab27704eeeb6a43ea75c1a9941764 Mon Sep 17 00:00:00 2001 From: Sam Alba Date: Mon, 21 Oct 2013 16:17:30 -0700 Subject: [PATCH 0215/4438] Removed docker version requirement Change-Id: I9f04db46706bf6453def09ca1e22e0db918d811b Closes-Bug: #1237581 --- lib/nova_plugins/hypervisor-docker | 3 +-- tools/docker/install_docker.sh | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index 427554b7db..70b1b6bdc5 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -37,7 +37,6 @@ DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-http://get.docker.io/images/opens DOCKER_REGISTRY_IMAGE_NAME=docker-registry DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME} -DOCKER_PACKAGE_VERSION=${DOCKER_PACKAGE_VERSION:-0.6.1} DOCKER_APT_REPO=${DOCKER_APT_REPO:-https://get.docker.io/ubuntu} @@ -72,7 +71,7 @@ function install_nova_hypervisor() { fi # Make sure Docker is installed - if ! is_package_installed lxc-docker-${DOCKER_PACKAGE_VERSION}; then + if ! is_package_installed lxc-docker; then die $LINENO "Docker is not installed. Please run tools/docker/install_docker.sh" fi diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh index 483955bfc2..2e5b510c41 100755 --- a/tools/docker/install_docker.sh +++ b/tools/docker/install_docker.sh @@ -38,7 +38,7 @@ curl https://get.docker.io/gpg | sudo apt-key add - install_package python-software-properties && \ sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list" apt_get update -install_package --force-yes lxc-docker-${DOCKER_PACKAGE_VERSION} socat +install_package --force-yes lxc-docker socat # Start the daemon - restart just in case the package ever auto-starts... restart_service docker From 62e8a30abd9d2504bfca1c1c1c72151d729cc9c8 Mon Sep 17 00:00:00 2001 From: Peter Feiner Date: Thu, 24 Oct 2013 17:49:00 -0400 Subject: [PATCH 0216/4438] Fix apache horizon permissions on Ubuntu 13.10 Fixes bug 1241574. Change-Id: If6c30874267a6bf30c114146f83b2d3220f32c1a --- lib/horizon | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/horizon b/lib/horizon index 63caf3c0f2..80f8df71f4 100644 --- a/lib/horizon +++ b/lib/horizon @@ -123,6 +123,11 @@ function init_horizon() { # Be a good citizen and use the distro tools here sudo touch $horizon_conf sudo a2ensite horizon.conf + if [[ "$DISTRO" == "saucy" ]]; then + # Ubuntu 13.10 has Require all denied in apache2.conf + # and requires explicit Require all granted + HORIZON_REQUIRE='Require all granted' + fi elif is_fedora; then if [[ "$os_RELEASE" -ge "18" ]]; then # fedora 18 has Require all denied in its httpd.conf From 20150864eb3ea1fbaa616f6a6cc022dc9bc13c14 Mon Sep 17 00:00:00 2001 From: Sam Alba Date: Thu, 24 Oct 2013 17:09:40 -0700 Subject: [PATCH 0217/4438] Removed dependency from dotcloud repos for the Nova docker driver Since the Nova driver is in Nova core from the Havana release, this will just clean the docker nova driver install. Change-Id: Ic98012b8b5e54e727a1b11f4d32f6623d2067621 --- lib/nova_plugins/hypervisor-docker | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index 427554b7db..2451982ed9 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -24,7 +24,6 @@ set +o xtrace # Set up default directories DOCKER_DIR=$DEST/docker -DOCKER_REPO=${DOCKER_REPO:-https://github.com/dotcloud/openstack-docker.git} DOCKER_BRANCH=${DOCKER_BRANCH:-master} DOCKER_UNIX_SOCKET=/var/run/docker.sock @@ -54,10 +53,6 @@ function cleanup_nova_hypervisor() { # configure_nova_hypervisor - Set config files, create data dirs, etc function configure_nova_hypervisor() { - git_clone $DOCKER_REPO $DOCKER_DIR $DOCKER_BRANCH - - ln -snf ${DOCKER_DIR}/nova-driver $NOVA_DIR/nova/virt/docker - iniset $NOVA_CONF DEFAULT compute_driver docker.DockerDriver iniset $GLANCE_API_CONF DEFAULT container_formats ami,ari,aki,bare,ovf,docker From 4540d00ebdd3283de47d6546b6e7575e2e9041ff Mon Sep 17 00:00:00 2001 From: Aaron Rosen Date: Thu, 24 Oct 2013 13:59:33 -0700 Subject: [PATCH 0218/4438] All neutron plugins should now use LibvirtGenericVIFDriver Change-Id: I70015ae55fe6db9c6c4663a8d021fe9cfe2eddcf --- lib/neutron | 4 ++++ lib/neutron_plugins/bigswitch_floodlight | 2 +- lib/neutron_plugins/linuxbridge_agent | 2 +- lib/neutron_plugins/nicira | 1 - lib/neutron_plugins/ovs_base | 2 +- lib/neutron_plugins/plumgrid | 3 +-- 6 files changed, 8 insertions(+), 6 deletions(-) diff --git a/lib/neutron b/lib/neutron index 44fb9e1005..3f39d33f41 100644 --- a/lib/neutron +++ b/lib/neutron @@ -110,6 +110,10 @@ Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} # The name of the default q-l3 router Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} +# nova vif driver that all plugins should use +NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + + # List of config file names in addition to the main plugin config file # See _configure_neutron_common() for details about setting it up declare -a Q_PLUGIN_EXTRA_CONF_FILES diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index 24507312c7..93ec497bb9 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -9,7 +9,7 @@ source $TOP_DIR/lib/neutron_plugins/ovs_base source $TOP_DIR/lib/neutron_thirdparty/bigswitch_floodlight # for third party service specific configuration values function neutron_plugin_create_nova_conf() { - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + : } function neutron_plugin_install_agent_packages() { diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index 88c49c5b5e..85e8c085be 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -11,7 +11,7 @@ function is_neutron_ovs_base_plugin() { } function neutron_plugin_create_nova_conf() { - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + : } function neutron_plugin_install_agent_packages() { diff --git a/lib/neutron_plugins/nicira b/lib/neutron_plugins/nicira index 7c99b692d6..87d3c3d17b 100644 --- a/lib/neutron_plugins/nicira +++ b/lib/neutron_plugins/nicira @@ -26,7 +26,6 @@ function is_neutron_ovs_base_plugin() { } function neutron_plugin_create_nova_conf() { - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtOpenVswitchDriver"} # if n-cpu is enabled, then setup integration bridge if is_service_enabled n-cpu; then setup_integration_bridge diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 1214f3bcbd..89db29d07f 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -73,7 +73,7 @@ function _neutron_ovs_base_configure_l3_agent() { } function _neutron_ovs_base_configure_nova_vif_driver() { - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + : } # Restore xtrace diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid index 9d3c92ff51..d4050bb951 100644 --- a/lib/neutron_plugins/plumgrid +++ b/lib/neutron_plugins/plumgrid @@ -9,8 +9,7 @@ set +o xtrace #source $TOP_DIR/lib/neutron_plugins/ovs_base function neutron_plugin_create_nova_conf() { - - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + : } function neutron_plugin_setup_interface_driver() { From 246d9bbd4a13cc2848411eda81eac8b311850717 Mon Sep 17 00:00:00 2001 From: Thomas Maddox Date: Thu, 24 Oct 2013 18:57:40 +0000 Subject: [PATCH 0219/4438] Add PostgreSQL option for Ceilometer backend Adds the PostgreSQL case for a Ceilometer backend and initializes Ceilometer accordingly. Closes-Bug: #1244381 Change-Id: Iefc5bb7eea6e9efa1f2ad04f1f8dc714e3404c9c --- lib/ceilometer | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index cd4c4d8656..a79ca55183 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -91,7 +91,7 @@ function configure_ceilometer() { iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $CEILOMETER_CONF keystone_authtoken signing_dir $CEILOMETER_AUTH_CACHE_DIR - if [[ "$CEILOMETER_BACKEND" = 'mysql' ]]; then + if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then iniset $CEILOMETER_CONF database connection `database_connection_url ceilometer` else iniset $CEILOMETER_CONF database connection mongodb://localhost:27017/ceilometer @@ -116,7 +116,7 @@ function init_ceilometer() { sudo chown $STACK_USER $CEILOMETER_AUTH_CACHE_DIR rm -f $CEILOMETER_AUTH_CACHE_DIR/* - if [[ "$CEILOMETER_BACKEND" = 'mysql' ]]; then + if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then recreate_database ceilometer utf8 $CEILOMETER_BIN_DIR/ceilometer-dbsync fi From 1ce2ffd15fbb9423cd5f705e10d34dee5e23a4d5 Mon Sep 17 00:00:00 2001 From: "Joe H. Rahme" Date: Tue, 22 Oct 2013 15:19:09 +0200 Subject: [PATCH 0220/4438] Enables Swift crossdomain middleware by default This step is needed to enable its testing in Tempest. The patch adds a variable SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH that holds a list of middlewares inserted in the pipeline before authentication middlewares (tempauth, keystoneauth, ...). Change-Id: I1927103feff997a354ccf82ccf12aa77db083ad3 --- lib/swift | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/lib/swift b/lib/swift index 6ab43c420f..2b23e44d78 100644 --- a/lib/swift +++ b/lib/swift @@ -72,6 +72,10 @@ SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb} # the end of the pipeline. SWIFT_EXTRAS_MIDDLEWARE_LAST=${SWIFT_EXTRAS_MIDDLEWARE_LAST} +# Set ``SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH`` to extras middlewares that need to be at +# the beginning of the pipeline, before authentication middlewares. +SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH=${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH:-crossdomain} + # The ring uses a configurable number of bits from a path’s MD5 hash as # a partition index that designates a device. The number of bits kept # from the hash is known as the partition power, and 2 to the partition @@ -210,7 +214,7 @@ function _config_swift_apache_wsgi() { # configure_swift() - Set config files, create data dirs and loop image function configure_swift() { - local swift_pipeline=" " + local swift_pipeline="${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH}" local node_number local swift_node_config local swift_log_dir @@ -271,7 +275,7 @@ function configure_swift() { # tempauth would be prefixed with the reseller_prefix setting TEMPAUTH_ the # token for keystoneauth would have the standard reseller_prefix AUTH_ if is_service_enabled swift3;then - swift_pipeline=" swift3 s3token " + swift_pipeline+=" swift3 s3token " fi swift_pipeline+=" authtoken keystoneauth tempauth " sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER} @@ -283,6 +287,9 @@ function configure_swift() { iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix "TEMPAUTH" + # Configure Crossdomain + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:crossdomain use "egg:swift#crossdomain" + # Configure Keystone sed -i '/^# \[filter:authtoken\]/,/^# \[filter:keystoneauth\]$/ s/^#[ \t]*//' ${SWIFT_CONFIG_PROXY_SERVER} iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST From 388e36c98bf3efae3a1e25eabdd291f0b5b5e7c9 Mon Sep 17 00:00:00 2001 From: Peter Feiner Date: Thu, 24 Oct 2013 18:51:44 -0400 Subject: [PATCH 0221/4438] zsh openrc compatibility Replacing $BASH_SOURCE with ${BASH_SOURCE:-$0} makes devstack zsh friendly: in bash, $BASH_SOURCE is used per usual; in zsh, where $BASH_SOURCE isn't defined, $0 is used, which, unlike in bash, evaluates to the current source file. Now you can source devstack's openrc from a zsh shell. Tested with bash and zsh from directories other than the root devstack directory. Change-Id: Iab1a817b15d86144163b5094bb58f94b15c598a0 --- eucarc | 2 +- openrc | 2 +- stackrc | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/eucarc b/eucarc index 2b0f7dd143..350235106c 100644 --- a/eucarc +++ b/eucarc @@ -13,7 +13,7 @@ if [[ -n "$2" ]]; then fi # Find the other rc files -RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) +RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) # Get user configuration source $RC_DIR/openrc diff --git a/openrc b/openrc index 3de7e3958f..5344d247bd 100644 --- a/openrc +++ b/openrc @@ -18,7 +18,7 @@ if [[ -n "$2" ]]; then fi # Find the other rc files -RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) +RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) # Import common functions source $RC_DIR/functions diff --git a/stackrc b/stackrc index 0151672c1d..4da481e98e 100644 --- a/stackrc +++ b/stackrc @@ -1,7 +1,7 @@ # stackrc # # Find the other rc files -RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) +RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) # Destination path for installation DEST=/opt/stack From f7cfa0c6e7a965949441ded6a789e12e5bc58039 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 25 Oct 2013 13:26:17 -0400 Subject: [PATCH 0222/4438] put the stackforge library installation behind a conditional we don't actually want to drag in stackforge libraries all the time, instead ensure that we have enabled stackforge_libs before doing it. Change-Id: Ic1c2e3d19e106a2aa0db9725d16a8b207546c23d --- stack.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 45604da140..6de7599bbf 100755 --- a/stack.sh +++ b/stack.sh @@ -631,7 +631,9 @@ install_infra install_oslo # Install stackforge libraries for testing -install_stackforge +if is_service_enabled stackforge_libs; then + install_stackforge +fi # Install clients libraries install_keystoneclient From f470d95b907b91e2879e389ea75dbdb1cb525b74 Mon Sep 17 00:00:00 2001 From: Chuck Short Date: Fri, 25 Oct 2013 15:08:44 -0700 Subject: [PATCH 0223/4438] Enable trusty Enable Trusty Tahir (14.04) the next Ubuntu LTS release. Change-Id: I48fe52345fefaf9ac7ba4be7d3f5675f72eea754 Signed-off-by: Chuck Short --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 6de7599bbf..36312ea619 100755 --- a/stack.sh +++ b/stack.sh @@ -131,7 +131,7 @@ disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|7.0|wheezy|sid|testing|jessie|f16|f17|f18|f19|opensuse-12.2|rhel6) ]]; then +if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f16|f17|f18|f19|opensuse-12.2|rhel6) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From e4a523f543e59d92ab227b5bcfaad09fc171b6a9 Mon Sep 17 00:00:00 2001 From: Joe Mills Date: Mon, 28 Oct 2013 07:38:55 +0000 Subject: [PATCH 0224/4438] Cleanup unused settings, use interface_driver setter * Remove unused MIDONET specific settings from nova.conf * Remove unused MIDONET specific settings from dhcp_agent.ini * Move the interface_driver settings to the proper setting function so that it can be used by lbaas_agent.ini as well. Change-Id: Id686ff5f55db00bce42b8a2bd56e7655c73211fb Closes-Bug: #1245401 --- lib/neutron_plugins/midonet | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet index cf45a9d11d..e406146bbe 100644 --- a/lib/neutron_plugins/midonet +++ b/lib/neutron_plugins/midonet @@ -32,23 +32,10 @@ function neutron_plugin_configure_debug_command() { function neutron_plugin_configure_dhcp_agent() { DHCP_DRIVER=${DHCP_DRIVER:-"neutron.plugins.midonet.agent.midonet_driver.DhcpNoOpDriver"} - DHCP_INTERFACE_DRIVER=${DHCP_INTEFACE_DRIVER:-"neutron.plugins.midonet.agent.midonet_driver.MidonetInterfaceDriver"} + neutron_plugin_setup_interface_driver $Q_DHCP_CONF_FILE iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_driver $DHCP_DRIVER - iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver $DHCP_INTERFACE_DRIVER iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces True iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True - if [[ "$MIDONET_API_URI" != "" ]]; then - iniset $Q_DHCP_CONF_FILE MIDONET midonet_uri "$MIDONET_API_URI" - fi - if [[ "$MIDONET_USERNAME" != "" ]]; then - iniset $Q_DHCP_CONF_FILE MIDONET username "$MIDONET_USERNAME" - fi - if [[ "$MIDONET_PASSWORD" != "" ]]; then - iniset $Q_DHCP_CONF_FILE MIDONET password "$MIDONET_PASSWORD" - fi - if [[ "$MIDONET_PROJECT_ID" != "" ]]; then - iniset $Q_DHCP_CONF_FILE MIDONET project_id "$MIDONET_PROJECT_ID" - fi } function neutron_plugin_configure_l3_agent() { @@ -78,8 +65,8 @@ function neutron_plugin_configure_service() { } function neutron_plugin_setup_interface_driver() { - # May change in the future - : + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.MidonetInterfaceDriver } function has_neutron_plugin_security_group() { From 8787e0fd0c4b2ec29e6016a13e913a9ef3ac5444 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Mon, 28 Oct 2013 18:15:57 +0000 Subject: [PATCH 0225/4438] xenapi: always reset JeOS's network If the JeOS template contained an exotic network configuration, the VM prep step might fail. This patch resets the networking of the VM before starting it. Fixes bug 1245607 Change-Id: I921f1fdd0709d7a7760c4bb165e32f3898098bff --- tools/xen/install_os_domU.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 9a2f5a8c03..33dc26f1bb 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -271,6 +271,12 @@ set_vm_memory "$GUEST_NAME" "$OSDOMU_MEM_MB" # Max out VCPU count for better performance max_vcpus "$GUEST_NAME" +# Wipe out all network cards +destroy_all_vifs_of "$GUEST_NAME" + +# Add only one interface to prepare the guest template +add_interface "$GUEST_NAME" "$MGT_BRIDGE_OR_NET_NAME" "0" + # start the VM to run the prepare steps xe vm-start vm="$GUEST_NAME" From b245c5d21c4eee77e4649e0115d579b7e28c9851 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 30 Oct 2013 15:11:08 -0400 Subject: [PATCH 0226/4438] Set Nova auth_proto regardless of tls-proxy. We actually want to comment out the keystone auth_proto in Nova's api-paste.ini file regardless of the tls-proxy setting. Likewise lets always set it in nova.conf as well. This should fix an issue in trying to get this in: https://review.openstack.org/#/c/52825/ Change-Id: I1b8202aa1666cbb6ca13d2f77d50fa0175969266 --- lib/nova | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/lib/nova b/lib/nova index 31f286d943..150025f35b 100644 --- a/lib/nova +++ b/lib/nova @@ -221,9 +221,7 @@ function configure_nova() { # Comment out the keystone configs in Nova's api-paste.ini. # We are using nova.conf to configure this instead. inicomment $NOVA_API_PASTE_INI filter:authtoken auth_host - if is_service_enabled tls-proxy; then - inicomment $NOVA_API_PASTE_INI filter:authtoken auth_protocol - fi + inicomment $NOVA_API_PASTE_INI filter:authtoken auth_protocol inicomment $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name inicomment $NOVA_API_PASTE_INI filter:authtoken admin_user inicomment $NOVA_API_PASTE_INI filter:authtoken admin_password @@ -397,9 +395,7 @@ function create_nova_conf() { # Add keystone authtoken configuration iniset $NOVA_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST - if is_service_enabled tls-proxy; then - iniset $NOVA_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - fi + iniset $NOVA_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL iniset $NOVA_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $NOVA_CONF keystone_authtoken admin_user nova iniset $NOVA_CONF keystone_authtoken admin_password $SERVICE_PASSWORD From 24f6efadbdef558655abc5f1052bb61ae87b55f0 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 31 Oct 2013 10:27:58 -0400 Subject: [PATCH 0227/4438] Add FORCE_CONFIG_DRIVE and make it the default Adds a new FORCE_CONFIG_DRIVE option to lib/nova which is by default enabled. Using config drive should speed things up a bit and is a more likely production default instead of file injection. Change-Id: I2388ef0df12a6289b619bfaf30cb952fcc48ef41 --- lib/nova | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/nova b/lib/nova index e9fbd560b4..8ef5d9a427 100644 --- a/lib/nova +++ b/lib/nova @@ -63,6 +63,10 @@ NOVA_ROOTWRAP=$(get_rootwrap_location nova) # NOTE: Set API_RATE_LIMIT="False" to turn OFF rate limiting API_RATE_LIMIT=${API_RATE_LIMIT:-"True"} +# Option to enable/disable config drive +# NOTE: Set FORCE_CONFIG_DRIVE="False" to turn OFF config drive +FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"always"} + # Nova supports pluggable schedulers. The default ``FilterScheduler`` # should work in most cases. SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler} @@ -428,6 +432,9 @@ function create_nova_conf() { if [ "$API_RATE_LIMIT" != "True" ]; then iniset $NOVA_CONF DEFAULT api_rate_limit "False" fi + if [ "$FORCE_CONFIG_DRIVE" != "False" ]; then + iniset $NOVA_CONF DEFAULT force_config_drive "$FORCE_CONFIG_DRIVE" + fi # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then setup_colorized_logging $NOVA_CONF DEFAULT From 861463fa591be56b5936777539d2349abc1cea00 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 31 Oct 2013 11:08:49 -0400 Subject: [PATCH 0228/4438] Remove docker exercise No other hypervisor has a dedicated exercise, docker should be tested just by testing the nova APIs, not on it's own. Change-Id: Ifc788815380e8502449171410dea8260786a1e79 --- exercises/docker.sh | 104 -------------------------------------------- 1 file changed, 104 deletions(-) delete mode 100755 exercises/docker.sh diff --git a/exercises/docker.sh b/exercises/docker.sh deleted file mode 100755 index 10c5436c35..0000000000 --- a/exercises/docker.sh +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env bash - -# **docker** - -# Test Docker hypervisor - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# Skip if the hypervisor is not Docker -[[ "$VIRT_DRIVER" == "docker" ]] || exit 55 - -# Import docker functions and declarations -source $TOP_DIR/lib/nova_plugins/hypervisor-docker - -# Image and flavor are ignored but the CLI requires them... - -# Instance type to create -DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} - -# Boot this image, use first AMI image if unset -DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} - -# Instance name -VM_NAME=ex-docker - - -# Launching a server -# ================== - -# Grab the id of the image to launch -IMAGE=$(glance image-list | egrep " $DOCKER_IMAGE_NAME:latest " | get_field 1) -die_if_not_set $LINENO IMAGE "Failure getting image $DOCKER_IMAGE_NAME" - -# Select a flavor -INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) -if [[ -z "$INSTANCE_TYPE" ]]; then - # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) -fi - -# Clean-up from previous runs -nova delete $VM_NAME || true -if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then - die $LINENO "server didn't terminate!" -fi - -# Boot instance -# ------------- - -VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE $VM_NAME | grep ' id ' | get_field 2) -die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" - -# Check that the status is active within ACTIVE_TIMEOUT seconds -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - die $LINENO "server didn't become active!" -fi - -# Get the instance IP -IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) -die_if_not_set $LINENO IP "Failure retrieving IP address" - -# Private IPs can be pinged in single node deployments -ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT - -# Clean up -# -------- - -# Delete instance -nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME" -if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then - die $LINENO "Server $VM_NAME not deleted" -fi - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" From 1143f7e45fd2760b8d5fecc8fbd598078ba92fd3 Mon Sep 17 00:00:00 2001 From: Jeff Peeler Date: Thu, 31 Oct 2013 16:21:52 -0400 Subject: [PATCH 0229/4438] Turn off Nova firewall driver when using Neutron As referenced in the OpenStack documentation, the firewall_driver in nova.conf should be set to nova.virt.firewall.NoopFirewallDriver, "so that nova-compute does not perform iptables-based filtering itself". Without this change, the driver gets set to nova.virt.libvirt.firewall.IptablesFirewallDriver, which seems to make networking unusable. Change-Id: Id9cce0f5f4efe719683aaf3284b128188b61b919 Closes-bug: #1246888 --- lib/neutron | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/neutron b/lib/neutron index 9834b59f66..098a589592 100644 --- a/lib/neutron +++ b/lib/neutron @@ -272,6 +272,7 @@ function create_nova_conf_neutron() { if [[ "$Q_USE_SECGROUP" == "True" ]]; then LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver + iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER iniset $NOVA_CONF DEFAULT security_group_api neutron fi From d561b70930f7184ade05953faa11a47dc250a16c Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 22 Oct 2013 17:46:00 -0500 Subject: [PATCH 0230/4438] Set keystone.conf to mode 0600 Set keystone.conf readable only by owner Fixes CVE-2013-1977 Fixed bug: 1168252 Change-Id: Idd13b7a58e257565052c54f72c65d8dceb23f27a --- lib/keystone | 1 + 1 file changed, 1 insertion(+) mode change 100755 => 100644 lib/keystone diff --git a/lib/keystone b/lib/keystone old mode 100755 new mode 100644 index 7011f66e99..4353ebab1c --- a/lib/keystone +++ b/lib/keystone @@ -126,6 +126,7 @@ function configure_keystone() { if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF + chmod 600 $KEYSTONE_CONF cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR if [[ -f "$KEYSTONE_DIR/etc/keystone-paste.ini" ]]; then cp -p "$KEYSTONE_DIR/etc/keystone-paste.ini" "$KEYSTONE_PASTE_INI" From 89d41ca23941d864ac9b5665cd68423311029236 Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Fri, 1 Nov 2013 15:41:01 -0500 Subject: [PATCH 0231/4438] Fix wrong variable comparision in functions A whitespace is missing. Closes-Bug: #1247273 Change-Id: I4bbed6077813c5d7522650baf2e78d9c44c1bba4 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 0a73b9f0ac..197b4cca17 100644 --- a/functions +++ b/functions @@ -1266,7 +1266,7 @@ function setup_develop() { # where we really really want the overridden version to stick. So provide # a variable that tells us whether or not we should UNDO the requirements # changes (this will be set to False in the OpenStack ci gate) - if [ $UNDO_REQUIREMENTS = "True"]; then + if [ $UNDO_REQUIREMENTS = "True" ]; then if [ $update_requirements -eq 0 ]; then (cd $project_dir && git reset --hard) fi From 86199fcdfead581a03b453484879d046729a0fcd Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 23 Oct 2013 02:54:53 -0700 Subject: [PATCH 0232/4438] Clean up use of global conf vars in start_nova Make it clear which services run in the API cell and compute (child) cells by using appropriately named local variables for the conf files. This should help save from future bugs. Also: When cells is enabled, there's no need to run nova-conductor in the API cell right now. Cells bypasses any use of conductor in the API cell. Change-Id: I3af17d3db028f5df36814cb83c7db4de8f141f84 --- lib/nova | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/lib/nova b/lib/nova index 8ef5d9a427..e734550b33 100644 --- a/lib/nova +++ b/lib/nova @@ -645,32 +645,32 @@ function start_nova_compute() { # start_nova() - Start running processes, including screen function start_nova_rest() { - NOVA_CONF_BOTTOM=$NOVA_CONF - - # ``screen_it`` checks ``is_service_enabled``, it is not needed here - screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor" - + local api_cell_conf=$NOVA_CONF if is_service_enabled n-cell; then - NOVA_CONF_BOTTOM=$NOVA_CELLS_CONF - screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CELLS_CONF" - screen_it n-cell-region "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CONF" - screen_it n-cell-child "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CELLS_CONF" + local compute_cell_conf=$NOVA_CELLS_CONF + else + local compute_cell_conf=$NOVA_CONF fi - screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" - screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network --config-file $NOVA_CONF_BOTTOM" - screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $NOVA_CONF_BOTTOM" - screen_it n-api-meta "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api-metadata --config-file $NOVA_CONF_BOTTOM" + # ``screen_it`` checks ``is_service_enabled``, it is not needed here + screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf" + screen_it n-cell-region "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf" + screen_it n-cell-child "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf" + + screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert --config-file $api_cell_conf" + screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf" + screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf" + screen_it n-api-meta "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf" - screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $NOVA_CONF --web $NOVNC_DIR" - screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF" - screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $NOVA_CONF --web $SPICE_DIR" - screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth" + screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_DIR" + screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf" + screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_DIR" + screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf" # Starting the nova-objectstore only if swift3 service is not enabled. # Swift will act as s3 objectstore. is_service_enabled swift3 || \ - screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore" + screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore --config-file $api_cell_conf" } function start_nova() { @@ -683,7 +683,7 @@ function stop_nova() { # Kill the nova screen windows # Some services are listed here twice since more than one instance # of a service may be running in certain configs. - for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cond n-cell n-cell n-api-meta; do + for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta; do screen -S $SCREEN_NAME -p $serv -X kill done if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then From e8fa8537a577aeceef70f3d651522705df4353c5 Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Sun, 3 Nov 2013 12:22:04 -0600 Subject: [PATCH 0233/4438] enhance logging for depend tracking mode Add some extra log, to make the console output more understandable Change-Id: If80cb9dba1031ad268bf9b6266ca83f93071a2ad --- stack.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/stack.sh b/stack.sh index 36312ea619..f6c4824ea8 100755 --- a/stack.sh +++ b/stack.sh @@ -757,6 +757,7 @@ fi if [[ $TRACK_DEPENDS = True ]]; then $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then + echo "Detect some changes for installed packages of pip, in depend tracking mode" cat $DEST/requires.diff fi echo "Ran stack.sh in depend tracking mode, bailing out now" From eca0a1c7b901e9602c7df89c32b35402360d22f6 Mon Sep 17 00:00:00 2001 From: Paul Czarkowski Date: Tue, 29 Oct 2013 17:15:22 -0500 Subject: [PATCH 0234/4438] removed unecessary lines for docker install * script was trying to copy file from a source that doesn't exist to destination that does. * variable DOCKER_BRANCH no longer used. Change-Id: I50fd55e97dfc4eaaa3c1f2bfc42e8cfc0e76cdf4 --- lib/nova_plugins/hypervisor-docker | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index 2451982ed9..ac509c59c2 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -24,7 +24,6 @@ set +o xtrace # Set up default directories DOCKER_DIR=$DEST/docker -DOCKER_BRANCH=${DOCKER_BRANCH:-master} DOCKER_UNIX_SOCKET=/var/run/docker.sock DOCKER_PID_FILE=/var/run/docker.pid @@ -55,8 +54,6 @@ function cleanup_nova_hypervisor() { function configure_nova_hypervisor() { iniset $NOVA_CONF DEFAULT compute_driver docker.DockerDriver iniset $GLANCE_API_CONF DEFAULT container_formats ami,ari,aki,bare,ovf,docker - - sudo cp -p ${DOCKER_DIR}/nova-driver/docker.filters $NOVA_CONF_DIR/rootwrap.d } # install_nova_hypervisor() - Install external components From 610af8cfa7ff9cadac80fdd37924ecd8fe0d546e Mon Sep 17 00:00:00 2001 From: Chris Buccella Date: Tue, 5 Nov 2013 12:56:34 +0000 Subject: [PATCH 0235/4438] Fix horizon config under Apache 2.4 Apache 2.4 now uses mod_authz_host for acces control. Horizon's Apache config needs an update to allow access to its directory, otherwise a 403 will be returned. This change replaces a similar previous fixes done for Fedora 18 and Ubuntu 13.10, since this is an Apache version issue, not a distro-specific one. Change-Id: Iecc17600d8e1aae6a7b0929b1493d712c307616f Closes-Bug: #1243075 --- functions | 12 ++++++++++++ lib/horizon | 15 +++++---------- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/functions b/functions index 0a73b9f0ac..5ca5eee925 100644 --- a/functions +++ b/functions @@ -557,6 +557,18 @@ function is_arch { [ "($uname -m)" = "$ARCH_TYPE" ] } +# Checks if installed Apache is <= given version +# $1 = x.y.z (version string of Apache) +function check_apache_version { + local cmd="apachectl" + if ! [[ -x $(which apachectl 2>/dev/null) ]]; then + cmd="/usr/sbin/apachectl" + fi + + local version=$($cmd -v | grep version | grep -Po 'Apache/\K[^ ]*') + expr "$version" '>=' $1 > /dev/null +} + # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the # ownership to the proper user. diff --git a/lib/horizon b/lib/horizon index c116ec257a..4cb2828f10 100644 --- a/lib/horizon +++ b/lib/horizon @@ -112,7 +112,12 @@ function init_horizon() { # Create an empty directory that apache uses as docroot sudo mkdir -p $HORIZON_DIR/.blackhole + # Apache 2.4 uses mod_authz_host for access control now (instead of "Allow") HORIZON_REQUIRE='' + if check_apache_version "2.4" ; then + HORIZON_REQUIRE='Require all granted' + fi + local horizon_conf=/etc/$APACHE_NAME/$APACHE_CONF_DIR/horizon.conf if is_ubuntu; then # Clean up the old config name @@ -120,17 +125,7 @@ function init_horizon() { # Be a good citizen and use the distro tools here sudo touch $horizon_conf sudo a2ensite horizon.conf - if [[ "$DISTRO" == "saucy" ]]; then - # Ubuntu 13.10 has Require all denied in apache2.conf - # and requires explicit Require all granted - HORIZON_REQUIRE='Require all granted' - fi elif is_fedora; then - if [[ "$os_RELEASE" -ge "18" ]]; then - # fedora 18 has Require all denied in its httpd.conf - # and requires explicit Require all granted - HORIZON_REQUIRE='Require all granted' - fi sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf elif is_suse; then : # nothing to do From 237225dd394d43e17c5406c8c0549e3e7d5e4cd2 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Wed, 6 Nov 2013 15:41:04 +0100 Subject: [PATCH 0236/4438] Fixing space issues between " and ] When we are using '[' aka. ``test`` , before the closing ']' we need to use a space. Otherwise the commands return with '2' so the "expression" will be a constant false. Change-Id: I673762e802c28335e03390b6608cf6bbee6aaba6 --- functions | 2 +- tools/build_ramdisk.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/functions b/functions index 0a73b9f0ac..197b4cca17 100644 --- a/functions +++ b/functions @@ -1266,7 +1266,7 @@ function setup_develop() { # where we really really want the overridden version to stick. So provide # a variable that tells us whether or not we should UNDO the requirements # changes (this will be set to False in the OpenStack ci gate) - if [ $UNDO_REQUIREMENTS = "True"]; then + if [ $UNDO_REQUIREMENTS = "True" ]; then if [ $update_requirements -eq 0 ]; then (cd $project_dir && git reset --hard) fi diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh index 3d9f76f4a5..737255578a 100755 --- a/tools/build_ramdisk.sh +++ b/tools/build_ramdisk.sh @@ -22,7 +22,7 @@ cleanup() { umount $MNTDIR rmdir $MNTDIR fi - if [ -n "$DEV_FILE_TMP" -a -e "$DEV_FILE_TMP "]; then + if [ -n "$DEV_FILE_TMP" -a -e "$DEV_FILE_TMP" ]; then rm -f $DEV_FILE_TMP fi if [ -n "$IMG_FILE_TMP" -a -e "$IMG_FILE_TMP" ]; then From c04ddbe8680dd91d2749e74b36728aee27036dea Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 6 Nov 2013 02:15:11 -0600 Subject: [PATCH 0237/4438] Robustify shocco install and config * shocco has some non-optional prereqs, make sure they are present if shocco is being installed * set the path to installed shocco correctly * add the working dir to .gitignore Change-Id: If786ea9e28d3595775f7b86d2fe760dff8047f49 --- .gitignore | 1 + tools/build_docs.sh | 13 +++++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 0c22c6b62a..a3d5b0d02a 100644 --- a/.gitignore +++ b/.gitignore @@ -13,5 +13,6 @@ stack-screenrc accrc .stackenv .prereqs +devstack-docs-* docs/ docs-files diff --git a/tools/build_docs.sh b/tools/build_docs.sh index 216e557025..8dca524f9f 100755 --- a/tools/build_docs.sh +++ b/tools/build_docs.sh @@ -28,22 +28,31 @@ MASTER_BRANCH=${MASTER_BRANCH:-master} # http://devstack.org is a GitHub gh-pages site in the https://github.com/cloudbuilders/devtack.git repo GH_PAGES_REPO=git@github.com:cloudbuilders/devstack.git +# Keep track of the devstack directory +TOP_DIR=$(cd $(dirname "$0")/.. && pwd) + # Uses this shocco branch: https://github.com/dtroyer/shocco/tree/rst_support SHOCCO=${SHOCCO:-shocco} if ! which shocco; then - if [[ ! -x shocco/shocco ]]; then + if [[ ! -x $TOP_DIR/shocco/shocco ]]; then if [[ -z "$INSTALL_SHOCCO" ]]; then echo "shocco not found in \$PATH, please set environment variable SHOCCO" exit 1 fi echo "Installing local copy of shocco" + if ! which pygmentize; then + sudo pip install Pygments + fi + if ! which rst2html.py; then + sudo pip install docutils + fi git clone -b rst_support https://github.com/dtroyer/shocco shocco cd shocco ./configure make cd .. fi - SHOCCO=shocco/shocco + SHOCCO=$TOP_DIR/shocco/shocco fi # Process command-line args From 5a35e73b3419df571f55efd2a68ef565469e3d1b Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Tue, 29 Oct 2013 08:23:43 +0100 Subject: [PATCH 0238/4438] Option for installing spice-html5 and novnc from packages The novnc and spice-html5 is installed from git repository by default, but not from an openstack* repository. In order to add vnc/spice proxy related gating tests they should be installed from packages. New boolean variables added to control the installation source: NOVNC_FROM_PACKAGE and SPICE_FROM_PACKAGE Related changes: https://review.openstack.org/#/c/51790/ https://review.openstack.org/#/c/50822/ Change-Id: I1e55fd99edd30876924a13160afb74ff3e97c485 --- lib/nova | 29 ++++++++++++++++++++++++----- stack.sh | 10 ---------- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/lib/nova b/lib/nova index 00f977d3db..eacd41599d 100644 --- a/lib/nova +++ b/lib/nova @@ -73,9 +73,6 @@ SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler} QEMU_CONF=/etc/libvirt/qemu.conf -NOVNC_DIR=$DEST/noVNC -SPICE_DIR=$DEST/spice-html5 - # Set default defaults here as some hypervisor drivers override these PUBLIC_INTERFACE_DEFAULT=br100 GUEST_INTERFACE_DEFAULT=eth0 @@ -590,6 +587,28 @@ function install_nova() { install_nova_hypervisor fi + if is_service_enabled n-novnc; then + # a websockets/html5 or flash powered VNC console for vm instances + if trueorfalse True "$NOVNC_FROM_PACKAGE"; then + NOVNC_WEB_DIR=/usr/share/novnc + install_package novnc + else + NOVNC_WEB_DIR=$DEST/noVNC + git_clone $NOVNC_REPO $NOVNC_WEB_DIR $NOVNC_BRANCH + fi + fi + + if is_service_enabled n-spice; then + # a websockets/html5 or flash powered SPICE console for vm instances + if trueorfalse True "$SPICE_FROM_PACKAGE"; then + SPICE_WEB_DIR=/usr/share/spice-html5 + install_package spice-html5 + else + SPICE_WEB_DIR=$DEST/spice-html5 + git_clone $SPICE_REPO $SPICE_WEB_DIR $SPICE_BRANCH + fi + fi + git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH setup_develop $NOVA_DIR sudo install -D -m 0644 -o $STACK_USER {$NOVA_DIR/tools/,/etc/bash_completion.d/}nova-manage.bash_completion @@ -658,9 +677,9 @@ function start_nova_rest() { screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf" screen_it n-api-meta "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf" - screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_DIR" + screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR" screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf" - screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_DIR" + screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR" screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf" # Starting the nova-objectstore only if swift3 service is not enabled. diff --git a/stack.sh b/stack.sh index 36312ea619..47d93bd642 100755 --- a/stack.sh +++ b/stack.sh @@ -694,16 +694,6 @@ if is_service_enabled nova; then configure_nova fi -if is_service_enabled n-novnc; then - # a websockets/html5 or flash powered VNC console for vm instances - git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH -fi - -if is_service_enabled n-spice; then - # a websockets/html5 or flash powered SPICE console for vm instances - git_clone $SPICE_REPO $SPICE_DIR $SPICE_BRANCH -fi - if is_service_enabled horizon; then # dashboard install_horizon From feb28837f4db9177835f94b6b9899a90c45a685d Mon Sep 17 00:00:00 2001 From: Ryan Hsu Date: Thu, 7 Nov 2013 12:12:35 -0800 Subject: [PATCH 0239/4438] Add new stack phase: post-extra The current existing phases "post-config" and "extra" are not sufficient to allow local.conf overrides to extra type services because they run after the services are installed and configured. This commit introduces a new phase called "post-extra" that runs after these existing phases. With this change, users are able to leverage local.conf to provide overridden options to services like Tempest. Change-Id: I5d758eebfda804dd1d8cbc3d5cc35ef4dcc8c96f Closes-Bug: #1249085 --- README.md | 1 + extras.d/80-tempest.sh | 3 +++ extras.d/README.md | 4 ++-- stack.sh | 7 +++++++ 4 files changed, 13 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 640fab65f9..34cd2efc90 100644 --- a/README.md +++ b/README.md @@ -326,6 +326,7 @@ The defined phases are: * **local** - extracts ``localrc`` from ``local.conf`` before ``stackrc`` is sourced * **post-config** - runs after the layer 2 services are configured and before they are started * **extra** - runs after services are started and before any files in ``extra.d`` are executed +* **post-extra** - runs after files in ``extra.d`` are executed The file is processed strictly in sequence; meta-sections may be specified more than once but if any settings are duplicated the last to appear in the file will be used. diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh index 75b702c700..0186e36aee 100644 --- a/extras.d/80-tempest.sh +++ b/extras.d/80-tempest.sh @@ -14,6 +14,9 @@ if is_service_enabled tempest; then echo_summary "Initializing Tempest" configure_tempest init_tempest + elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then + # local.conf Tempest option overrides + : fi if [[ "$1" == "unstack" ]]; then diff --git a/extras.d/README.md b/extras.d/README.md index 88e4265ced..1dd17da2d6 100644 --- a/extras.d/README.md +++ b/extras.d/README.md @@ -19,10 +19,10 @@ sourced with one or more arguments, the first of which defines the hook phase: source: always called first in any of the scripts, used to set the initial defaults in a lib/* script or similar - stack: called by stack.sh. There are three possible values for + stack: called by stack.sh. There are four possible values for the second arg to distinguish the phase stack.sh is in: - arg 2: install | post-config | extra + arg 2: install | post-config | extra | post-extra unstack: called by unstack.sh diff --git a/stack.sh b/stack.sh index 47d93bd642..c24257d815 100755 --- a/stack.sh +++ b/stack.sh @@ -1252,6 +1252,13 @@ if [[ -d $TOP_DIR/extras.d ]]; then done fi +# Local Configuration +# =================== + +# Apply configuration from local.conf if it exists for layer 2 services +# Phase: post-extra +merge_config_group $TOP_DIR/local.conf post-extra + # Run local script # ================ From add4ca3ef0f916e31a781e118c8c4d04a9bec5cf Mon Sep 17 00:00:00 2001 From: Ben Nemec Date: Fri, 8 Nov 2013 17:22:51 +0000 Subject: [PATCH 0240/4438] Fix FROM_PACKAGE checks for novnc and spice The logic for installing novnc and spice from packages is broken, which makes it impossible to install from git, which makes bug 1248923 more serious. Change-Id: I9ae722a5470a16555bca9018da342485f6d3e896 Related-Bug: 1248923 --- lib/nova | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index eacd41599d..b9fa3e1b9c 100644 --- a/lib/nova +++ b/lib/nova @@ -589,7 +589,8 @@ function install_nova() { if is_service_enabled n-novnc; then # a websockets/html5 or flash powered VNC console for vm instances - if trueorfalse True "$NOVNC_FROM_PACKAGE"; then + NOVNC_FROM_PACKAGE=`trueorfalse True $NOVNC_FROM_PACKAGE` + if [ "$NOVNC_FROM_PACKAGE" = "True" ]; then NOVNC_WEB_DIR=/usr/share/novnc install_package novnc else @@ -600,7 +601,8 @@ function install_nova() { if is_service_enabled n-spice; then # a websockets/html5 or flash powered SPICE console for vm instances - if trueorfalse True "$SPICE_FROM_PACKAGE"; then + SPICE_FROM_PACKAGE=`trueorfalse True $SPICE_FROM_PACKAGE` + if [ "$SPICE_FROM_PACKAGE" = "True" ]; then SPICE_WEB_DIR=/usr/share/spice-html5 install_package spice-html5 else From 645171c5fdfa1d43441c411c00c8080218cb6bbe Mon Sep 17 00:00:00 2001 From: Ilya Kharin Date: Tue, 12 Nov 2013 12:44:20 +0400 Subject: [PATCH 0241/4438] Don't install novnc from package by default By default the installation is failed because the novnc package has got the a dependency of the nova-common package. The last package provides "/etc/nova/*" files with appropriate privileges. It potentially brings to the problem of an access to those files. Change-Id: I1689bef817365b10bf972dc4a8033892ad5939d1 Related-Bug: 1248923 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index b9fa3e1b9c..5b6f50e9ec 100644 --- a/lib/nova +++ b/lib/nova @@ -589,7 +589,7 @@ function install_nova() { if is_service_enabled n-novnc; then # a websockets/html5 or flash powered VNC console for vm instances - NOVNC_FROM_PACKAGE=`trueorfalse True $NOVNC_FROM_PACKAGE` + NOVNC_FROM_PACKAGE=`trueorfalse False $NOVNC_FROM_PACKAGE` if [ "$NOVNC_FROM_PACKAGE" = "True" ]; then NOVNC_WEB_DIR=/usr/share/novnc install_package novnc From 4df4a15a7213f24c99506269acb0129689be56a8 Mon Sep 17 00:00:00 2001 From: Roman Bogorodskiy Date: Tue, 12 Nov 2013 12:09:40 +0000 Subject: [PATCH 0242/4438] Fix typo in config section name in README.md s/locarc/localrc/ Change-Id: Ia5cdfa8e6995e5cad80750372faa35927d4d8e48 --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 640fab65f9..93d139621f 100644 --- a/README.md +++ b/README.md @@ -82,7 +82,7 @@ for example). # Customizing You can override environment variables used in `stack.sh` by creating file -name `local.conf` with a ``locarc`` section as shown below. It is likely +name `local.conf` with a ``localrc`` section as shown below. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host. From 261852ddfca72882e3e1f8a0bd3011465b1e70ac Mon Sep 17 00:00:00 2001 From: Noorul Islam K M Date: Tue, 12 Nov 2013 20:24:57 +0530 Subject: [PATCH 0243/4438] Add pip folder to ignore list Change-Id: I26da80b9e09774b9940d382a32cf69e75c61a527 --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 0c22c6b62a..380eacfb5b 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ localrc local.sh files/*.gz files/images +files/pip-* stack-screenrc *.pem accrc From 4debfe2b2da5011a93b44d09283b8dfdaf40c0bc Mon Sep 17 00:00:00 2001 From: John Griffith Date: Fri, 1 Nov 2013 00:00:40 +0000 Subject: [PATCH 0244/4438] Add driver_cert wrapper for cinder This adds a simple wrapper to call tempest volume tests. The idea is to make it easy to execute and capture results from tempest.api.volume.test_* Concept is for drivers in Cinder to configure cinder.conf as needed and then run this script which will restart services and kick off the tempest tests, and capture the output to a logfile for submission. To run, 1. deploy devstack as normal with tempest included in enabled_services 2. modify cinder.conf appropriately for your driver 3. execute the script devstack/driver_certs/cinder_driver_cert.sh Change-Id: I98ec9e1e418a8416406db5e2e6ffd21992e392cf --- driver_certs/cinder_driver_cert.sh | 87 ++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100755 driver_certs/cinder_driver_cert.sh diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh new file mode 100755 index 0000000000..18bef8b3b5 --- /dev/null +++ b/driver_certs/cinder_driver_cert.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash + +# **cinder_cert.sh** + +CERT_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $CERT_DIR/..; pwd) + +source $TOP_DIR/functions +source $TOP_DIR/stackrc +source $TOP_DIR/openrc +source $TOP_DIR/lib/tempest +source $TOP_DIR/lib/cinder + +TEMPFILE=`mktemp` +RECLONE=True + +function log_message() { + MESSAGE=$1 + STEP_HEADER=$2 + if [[ "$STEP_HEADER" = "True" ]]; then + echo -e "\n========================================================" | tee -a $TEMPFILE + fi + echo -e `date +%m/%d/%y/%T:`"${MESSAGE}" | tee -a $TEMPFILE + if [[ "$STEP_HEADER" = "True" ]]; then + echo -e "========================================================" | tee -a $TEMPFILE + fi +} + +if [[ "$OFFLINE" = "True" ]]; then + echo "ERROR: Driver cert requires fresh clone/pull from ${CINDER_BRANCH}" + echo " Please set OFFLINE=False and retry." + exit 1 +fi + +log_message "RUNNING CINDER DRIVER CERTIFICATION CHECK", True +log_message "Output is being logged to: $TEMPFILE" + +cd $CINDER_DIR +log_message "Cloning to ${CINDER_REPO}...", True +install_cinder + +log_message "Pull a fresh Clone of cinder repo...", True +git status | tee -a $TEMPFILE +git log --pretty=oneline -n 1 | tee -a $TEMPFILE + +log_message "Gathering copy of cinder.conf file (passwords will be scrubbed)...", True +cat /etc/cinder/cinder.conf | egrep -v "(^#.*|^$)" | tee -a $TEMPFILE +sed -i "s/\(.*password.*=\).*$/\1 xxx/i" $TEMPFILE +log_message "End of cinder.conf.", True + +cd $TOP_DIR +# Verify tempest is installed/enabled +if ! is_service_enabled tempest; then + log_message "ERROR!!! Cert requires tempest in enabled_services!", True + log_message" Please add tempest to enabled_services and retry." + exit 1 +fi + +cd $TEMPEST_DIR +install_tempest + +log_message "Verify tempest is current....", True +git status | tee -a $TEMPFILE +log_message "Check status and get latest commit..." +git log --pretty=oneline -n 1 | tee -a $TEMPFILE + + +#stop and restart cinder services +log_message "Restart Cinder services...", True +stop_cinder +sleep 1 +start_cinder +sleep 5 + +# run tempest api/volume/test_* +log_message "Run the actual tempest volume tests (run_tests.sh -N tempest.api.volume.test_*)...", True +exec 2> >(tee -a $TEMPFILE) +`./run_tests.sh -N tempest.api.volume.test_*` +if [[ $? = 0 ]]; then + log_message "CONGRATULATIONS!!! Device driver PASSED!", True + log_message "Submit output: ($TEMPFILE)" + exit 0 +else + log_message "SORRY!!! Device driver FAILED!", True + log_message "Check output in $TEMPFILE" + exit 1 +fi From 5ea53ee5f710076eba80ee9677afd3769fd2f36a Mon Sep 17 00:00:00 2001 From: Arnaud Legendre Date: Fri, 1 Nov 2013 16:42:54 -0700 Subject: [PATCH 0245/4438] Use vmdk descriptor to populate image properties image_upload.sh doesn't use the descriptor properties embedded inside the vmdk file. This requires the user to manually change the filename of the vmdk file to add the properties (disk type, storage adapter and network adapter). In case of a sparse monolithic sparse or stream-optimized sparse, these properties are extracted from the descriptor. The user can still override these values by modifying the filename. Change-Id: I1734311c66efe60a1a30e3ea63cc2a9da9cdb5b4 Closes-Bug: #1247300 --- functions | 30 +++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/functions b/functions index 83826f9327..bec76b77ed 100644 --- a/functions +++ b/functions @@ -1320,18 +1320,42 @@ function upload_image() { # Before we can upload vmdk type images to glance, we need to know it's # disk type, storage adapter, and networking adapter. These values are - # passed to glance as custom properties. We take these values from the + # passed to glance as custom properties. + # We take these values from the vmdk file if populated. Otherwise, we use # vmdk filename, which is expected in the following format: # # -:: # # If the filename does not follow the above format then the vsphere # driver will supply default values. + + # vmdk adapter type + vmdk_adapter_type="$(head -25 $IMAGE | grep -a -F -m 1 'ddb.adapterType =' $IMAGE)" + vmdk_adapter_type="${vmdk_adapter_type#*\"}" + vmdk_adapter_type="${vmdk_adapter_type%?}" + + # vmdk disk type + vmdk_create_type="$(head -25 $IMAGE | grep -a -F -m 1 'createType=' $IMAGE)" + vmdk_create_type="${vmdk_create_type#*\"}" + vmdk_create_type="${vmdk_create_type%?}" + if [[ "$vmdk_create_type" = "monolithicSparse" ]]; then + vmdk_disktype="sparse" + elif [[ "$vmdk_create_type" = "monolithicFlat" ]]; then + die $LINENO "Monolithic flat disks should use a descriptor-data pair." \ + "Please provide the disk and not the descriptor." + else + #TODO(alegendre): handle streamOptimized once supported by VMware driver. + vmdk_disktype="preallocated" + fi property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).+:.+:.+$'` if [[ ! -z "$property_string" ]]; then IFS=':' read -a props <<< "$property_string" - vmdk_disktype="${props[0]}" - vmdk_adapter_type="${props[1]}" + if [[ ! -z "${props[0]}" ]]; then + vmdk_disktype="${props[0]}" + fi + if [[ ! -z "${props[1]}" ]]; then + vmdk_adapter_type="${props[1]}" + fi vmdk_net_adapter="${props[2]}" fi From 66c54249805c9a6e863c81b754f4abae71aa1b2b Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Tue, 12 Nov 2013 16:24:14 -0800 Subject: [PATCH 0246/4438] Bump SWIFT_LOOPBACK_DISK_SIZE_DEFAULT over swift max_file_size Swift is returning 50x error codes because its disk is too small, set size bigger then max_file_size in an attempt to fix the problem, or at least reduce it. "we create a 4GB device, but swift thinks it can write 5GB, hence fail" --sdague This patch based off of Iccd6368e4df71abb5ccfe7d361c64d86e1071d35 Change-Id: Ib56a98cd74e7edf1fa90facc25c72632d43180f1 Related-Bug: #1225664 --- lib/swift | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/swift b/lib/swift index b46537f5a3..83c4ebb49d 100644 --- a/lib/swift +++ b/lib/swift @@ -59,9 +59,9 @@ fi # kilobytes. # Default is 1 gigabyte. SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=1G -# if tempest enabled the default size is 4 Gigabyte. +# if tempest enabled the default size is 6 Gigabyte. if is_service_enabled tempest; then - SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=${SWIFT_LOOPBACK_DISK_SIZE:-4G} + SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=${SWIFT_LOOPBACK_DISK_SIZE:-6G} fi SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_DEFAULT} From fc1b7786eb68f8df254804590f2809c0e342a3ab Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 23 Oct 2013 06:46:43 +0000 Subject: [PATCH 0247/4438] cm-cpu-agent only support libvirt driver ceilometer-compute-agent only support libvirt driver Change-Id: I8b92ef10f52388ead11f8ce51c9ab119f953efae --- lib/ceilometer | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ceilometer b/lib/ceilometer index f95ed302ce..dcadb07899 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -137,7 +137,9 @@ function install_ceilometerclient() { # start_ceilometer() - Start running processes, including screen function start_ceilometer() { - screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" + fi screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF" screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF" screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" From 8ceb794c65742c573ca555ff6b8c9cd470a52304 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 23 Oct 2013 09:26:25 +0200 Subject: [PATCH 0248/4438] Allow users to configure the CM pipeline interval The patch allows users to configure the ceilometer pipeline interval. In localrc, we can add CEILOMETER_PIPELINE_INTERVAL=10 to handle the pipeline each 10 seconds instead of the default 10 minutes. Change-Id: Ic5216adbdfd70ade38912871ac6df3be732bf780 --- lib/ceilometer | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ceilometer b/lib/ceilometer index dcadb07899..e626427777 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -82,6 +82,10 @@ function configure_ceilometer() { cp $CEILOMETER_DIR/etc/ceilometer/pipeline.yaml $CEILOMETER_CONF_DIR iniset $CEILOMETER_CONF DEFAULT policy_file $CEILOMETER_CONF_DIR/policy.json + if [ "$CEILOMETER_PIPELINE_INTERVAL" ]; then + sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/pipeline.yaml + fi + # the compute and central agents need these credentials in order to # call out to the public nova and glance APIs iniset $CEILOMETER_CONF DEFAULT os_username ceilometer From e231438bf62adb9014e644a443c2165a89812fd3 Mon Sep 17 00:00:00 2001 From: Angus Salkeld Date: Thu, 14 Nov 2013 14:36:46 +1100 Subject: [PATCH 0249/4438] Use the oslo db database connection for Heat This is the preferred configuration now, and sql_connection is deprecated. Change-Id: I2d8342b5025ab481e1db0521a3e0610b73bda9de --- lib/heat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index 8f123ea212..7a9ef0da26 100644 --- a/lib/heat +++ b/lib/heat @@ -80,7 +80,7 @@ function configure_heat() { iniset $HEAT_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT iniset $HEAT_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition iniset $HEAT_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT - iniset $HEAT_CONF DEFAULT sql_connection `database_connection_url heat` + iniset $HEAT_CONF database connection `database_connection_url heat` iniset $HEAT_CONF DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/random` # logging From d0059595529883719726ec146534a6639dbae65e Mon Sep 17 00:00:00 2001 From: Roman Prykhodchenko Date: Thu, 14 Nov 2013 09:58:53 +0200 Subject: [PATCH 0250/4438] Enable/disable ironic in tempest config In order to run tempest tests for Ironic in devstack the availability of the Ironic service must be set in the tempest config. This patch adds a shortcut for Ironic services and sets availability of Ironic in tempest config. Change-Id: I206fc2ea13412ceb128f8bfe90c153348d6f2f3e --- functions | 1 + lib/tempest | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/functions b/functions index afb75cce9c..9c65cd1209 100644 --- a/functions +++ b/functions @@ -841,6 +841,7 @@ function is_service_enabled() { [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0 [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0 [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0 + [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && return 0 [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && return 0 [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0 diff --git a/lib/tempest b/lib/tempest index ec1fc90b76..fca3884e7f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -300,7 +300,7 @@ function configure_tempest() { iniset $TEMPEST_CONF cli cli_dir $NOVA_BIN_DIR # service_available - for service in nova cinder glance neutron swift heat horizon ceilometer; do + for service in nova cinder glance neutron swift heat horizon ceilometer ironic; do if is_service_enabled $service ; then iniset $TEMPEST_CONF service_available $service "True" else From 394c11c72191fff6eed1543600260b9de7f55676 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A9ri=20Le=20Bouder?= Date: Tue, 5 Nov 2013 10:35:55 +0100 Subject: [PATCH 0251/4438] horizon: drop nodejs dependency since commit a0739c9423a4c559b45af96fa4cdb30539dcdbd7, horizon use a pure Python alternative to nodejs Change-Id: I231b453e42c303c3cc29e8bea4d7b935fecdccd2 --- files/apts/horizon | 2 -- files/rpms-suse/horizon | 1 - files/rpms/horizon | 1 - lib/horizon | 9 --------- tools/fixup_stuff.sh | 3 +-- 5 files changed, 1 insertion(+), 15 deletions(-) diff --git a/files/apts/horizon b/files/apts/horizon index 0865931d44..8969046355 100644 --- a/files/apts/horizon +++ b/files/apts/horizon @@ -19,5 +19,3 @@ python-kombu python-coverage python-cherrypy3 # why? python-migrate -nodejs -nodejs-legacy # dist:quantal diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon index 73932acc1d..d3bde2690c 100644 --- a/files/rpms-suse/horizon +++ b/files/rpms-suse/horizon @@ -1,6 +1,5 @@ apache2 # NOPRIME apache2-mod_wsgi # NOPRIME -nodejs python-CherryPy # why? (coming from apts) python-Paste python-PasteDeploy diff --git a/files/rpms/horizon b/files/rpms/horizon index 0ca18cadb7..aa27ab4e97 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -3,7 +3,6 @@ django-registration gcc httpd # NOPRIME mod_wsgi # NOPRIME -nodejs # NOPRIME pylint python-anyjson python-BeautifulSoup diff --git a/lib/horizon b/lib/horizon index 4cb2828f10..5bff712743 100644 --- a/lib/horizon +++ b/lib/horizon @@ -153,15 +153,6 @@ function install_horizon() { # Apache installation, because we mark it NOPRIME install_apache_wsgi - # NOTE(sdague) quantal changed the name of the node binary - if is_ubuntu; then - if [[ ! -e "/usr/bin/node" ]]; then - install_package nodejs-legacy - fi - elif is_fedora && [[ $DISTRO =~ (rhel6) || "$os_RELEASE" -ge "18" ]]; then - install_package nodejs - fi - git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG } diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 325a6d6be1..f9362307d8 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -76,8 +76,7 @@ fi if [[ $DISTRO =~ (rhel6) ]]; then # Disable selinux to avoid configuring to allow Apache access - # to Horizon files or run nodejs (LP#1175444) - # FIXME(dtroyer): see if this can be skipped without node or if Horizon is not enabled + # to Horizon files (LP#1175444) if selinuxenabled; then sudo setenforce 0 fi From 2b8814d0ecbca897f4bcfdf1117e773bc4b45e77 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 25 Sep 2013 17:07:06 +0100 Subject: [PATCH 0252/4438] xenapi: enable user to specify FLAT_NETWORK_BRIDGE install_os_domU.sh failed, if the FLAT_NETWORK_BRIDGE is found in localrc. As nova looks up the network by either name-label or bridge name, it makes sense to enable the user to specify this parameter. As an example, if the user wants to use name-labels to specify networks, and those name-labels could be used in domU to create bridges: VM_BRIDGE_OR_NET_NAME="osvmnet" FLAT_NETWORK_BRIDGE="osvmnet" In this case, the domU will know only about a name label, so it could be decoupled from which xapi bridges used. This change also adds some fixes (missing double quotes). Change-Id: I045e367ef441be20c4e8cb8af3c1149392db796b --- tools/xen/functions | 4 ++-- tools/xen/install_os_domU.sh | 15 +++++++++------ 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/tools/xen/functions b/tools/xen/functions index b0b077d8d1..563303da21 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -137,14 +137,14 @@ function _network_exists() { local name_label name_label=$1 - ! [ -z $(xe network-list name-label="$name_label" --minimal) ] + ! [ -z "$(xe network-list name-label="$name_label" --minimal)" ] } function _bridge_exists() { local bridge bridge=$1 - ! [ -z $(xe network-list bridge="$bridge" --minimal) ] + ! [ -z "$(xe network-list bridge="$bridge" --minimal)" ] } function _network_uuid() { diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 33dc26f1bb..6ce334bc00 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -111,12 +111,15 @@ if is_service_enabled neutron; then fi if parameter_is_specified "FLAT_NETWORK_BRIDGE"; then - cat >&2 << EOF -ERROR: FLAT_NETWORK_BRIDGE is specified in localrc file -This is considered as an error, as its value will be derived from the -VM_BRIDGE_OR_NET_NAME variable's value. + if [ "$(bridge_for "$VM_BRIDGE_OR_NET_NAME")" != "$(bridge_for "$FLAT_NETWORK_BRIDGE")" ]; then + cat >&2 << EOF +ERROR: FLAT_NETWORK_BRIDGE is specified in localrc file, and either no network +found on XenServer by searching for networks by that value as name-label or +bridge name or the network found does not match the network specified by +VM_BRIDGE_OR_NET_NAME. Please check your localrc file. EOF - exit 1 + exit 1 + fi fi if ! xenapi_is_listening_on "$MGT_BRIDGE_OR_NET_NAME"; then @@ -310,7 +313,7 @@ if is_service_enabled neutron; then "xen_integration_bridge=${XEN_INTEGRATION_BRIDGE}" fi -FLAT_NETWORK_BRIDGE=$(bridge_for "$VM_BRIDGE_OR_NET_NAME") +FLAT_NETWORK_BRIDGE="${FLAT_NETWORK_BRIDGE:-$(bridge_for "$VM_BRIDGE_OR_NET_NAME")}" append_kernel_cmdline "$GUEST_NAME" "flat_network_bridge=${FLAT_NETWORK_BRIDGE}" # Add a separate xvdb, if it was requested From 61ae7c166c59f6dae28e9f9437cfe4468c600808 Mon Sep 17 00:00:00 2001 From: Peter Feiner Date: Fri, 15 Nov 2013 10:42:30 -0500 Subject: [PATCH 0253/4438] support memcache for keystone token backend Change-Id: I0c85a64932c39264b73cff4f9d952d0dbdf49e5b --- lib/keystone | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/lib/keystone b/lib/keystone index 4353ebab1c..0521bd3eac 100644 --- a/lib/keystone +++ b/lib/keystone @@ -190,6 +190,8 @@ function configure_keystone() { if [[ "$KEYSTONE_TOKEN_BACKEND" = "sql" ]]; then iniset $KEYSTONE_CONF token driver keystone.token.backends.sql.Token + elif [[ "$KEYSTONE_TOKEN_BACKEND" = "memcache" ]]; then + iniset $KEYSTONE_CONF token driver keystone.token.backends.memcache.Token else iniset $KEYSTONE_CONF token driver keystone.token.backends.kvs.Token fi @@ -350,6 +352,17 @@ function install_keystone() { if is_service_enabled ldap; then install_ldap fi + if [[ "$KEYSTONE_TOKEN_BACKEND" = "memcache" ]]; then + # Install memcached and the memcache Python library that keystone uses. + # Unfortunately the Python library goes by different names in the .deb + # and .rpm circles. + install_package memcached + if is_ubuntu; then + install_package python-memcache + else + install_package python-memcached + fi + fi git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH setup_develop $KEYSTONE_DIR if is_apache_enabled_service key; then From 3e439448b5ab1ea1cf2bfaef5d08c6ce41819912 Mon Sep 17 00:00:00 2001 From: Arnaud Legendre Date: Fri, 15 Nov 2013 16:06:03 -0800 Subject: [PATCH 0254/4438] upload_image.sh should handle file URLs upload_image.sh doesn't handle correctly file URLs: a file URL works only if the file is already in the cache. This patch provides support for file URLs of local files (RFC 1738) http://tools.ietf.org/html/rfc1738 Change-Id: I107299c543cfa189e32848c32eefdbeb51a5e1f5 Closes-Bug: #1251752 --- functions | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/functions b/functions index effdc53afb..a9363f8c3e 100644 --- a/functions +++ b/functions @@ -1337,11 +1337,24 @@ function upload_image() { # Create a directory for the downloaded image tarballs. mkdir -p $FILES/images - # Downloads the image (uec ami+aki style), then extracts it. - IMAGE_FNAME=`basename "$image_url"` - if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then - wget -c $image_url -O $FILES/$IMAGE_FNAME - if [[ $? -ne 0 ]]; then + if [[ $image_url != file* ]]; then + # Downloads the image (uec ami+aki style), then extracts it. + IMAGE_FNAME=`basename "$image_url"` + if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then + wget -c $image_url -O $FILES/$IMAGE_FNAME + if [[ $? -ne 0 ]]; then + echo "Not found: $image_url" + return + fi + fi + IMAGE="$FILES/${IMAGE_FNAME}" + else + # File based URL (RFC 1738): file://host/path + # Remote files are not considered here. + # *nix: file:///home/user/path/file + # windows: file:///C:/Documents%20and%20Settings/user/path/file + IMAGE=$(echo $image_url | sed "s/^file:\/\///g") + if [[ ! -f $IMAGE || "$(stat -c "%s" $IMAGE)" == "0" ]]; then echo "Not found: $image_url" return fi @@ -1349,7 +1362,6 @@ function upload_image() { # OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading if [[ "$image_url" =~ 'openvz' ]]; then - IMAGE="$FILES/${IMAGE_FNAME}" IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format ami --disk-format ami < "${IMAGE}" return @@ -1357,7 +1369,6 @@ function upload_image() { # vmdk format images if [[ "$image_url" =~ '.vmdk' ]]; then - IMAGE="$FILES/${IMAGE_FNAME}" IMAGE_NAME="${IMAGE_FNAME%.vmdk}" # Before we can upload vmdk type images to glance, we need to know it's @@ -1408,7 +1419,6 @@ function upload_image() { # XenServer-vhd-ovf-format images are provided as .vhd.tgz # and should not be decompressed prior to loading if [[ "$image_url" =~ '.vhd.tgz' ]]; then - IMAGE="$FILES/${IMAGE_FNAME}" IMAGE_NAME="${IMAGE_FNAME%.vhd.tgz}" glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format=ovf --disk-format=vhd < "${IMAGE}" return @@ -1418,7 +1428,6 @@ function upload_image() { # and should not be decompressed prior to loading. # Setting metadata, so PV mode is used. if [[ "$image_url" =~ '.xen-raw.tgz' ]]; then - IMAGE="$FILES/${IMAGE_FNAME}" IMAGE_NAME="${IMAGE_FNAME%.xen-raw.tgz}" glance \ --os-auth-token $token \ @@ -1456,7 +1465,6 @@ function upload_image() { fi ;; *.img) - IMAGE="$FILES/$IMAGE_FNAME"; IMAGE_NAME=$(basename "$IMAGE" ".img") format=$(qemu-img info ${IMAGE} | awk '/^file format/ { print $3; exit }') if [[ ",qcow2,raw,vdi,vmdk,vpc," =~ ",$format," ]]; then @@ -1467,20 +1475,17 @@ function upload_image() { CONTAINER_FORMAT=bare ;; *.img.gz) - IMAGE="$FILES/${IMAGE_FNAME}" IMAGE_NAME=$(basename "$IMAGE" ".img.gz") DISK_FORMAT=raw CONTAINER_FORMAT=bare UNPACK=zcat ;; *.qcow2) - IMAGE="$FILES/${IMAGE_FNAME}" IMAGE_NAME=$(basename "$IMAGE" ".qcow2") DISK_FORMAT=qcow2 CONTAINER_FORMAT=bare ;; *.iso) - IMAGE="$FILES/${IMAGE_FNAME}" IMAGE_NAME=$(basename "$IMAGE" ".iso") DISK_FORMAT=iso CONTAINER_FORMAT=bare From 047cac56e14552eb6b2d374a35f3a092c5f2a5d4 Mon Sep 17 00:00:00 2001 From: Steve Kowalik Date: Thu, 7 Nov 2013 22:36:10 +1100 Subject: [PATCH 0255/4438] Switch the base URL to git.openstack.org The git repositories on github for openstack are mirrors of the primary repositories, which are hosted on git.openstack.org, so switch as much as I can to using the primary, rather than the github mirror. Change-Id: Idcfda49a691582055256b830c61e098f4a271339 --- README.md | 2 +- stackrc | 4 ++-- tools/build_tempest.sh | 2 +- tools/xen/test_functions.sh | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 93d139621f..b2603e75fc 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ You can also pick specific OpenStack project releases by setting the appropriate `stackrc` for the default set). Usually just before a release there will be milestone-proposed branches that need to be tested:: - GLANCE_REPO=https://github.com/openstack/glance.git + GLANCE_REPO=git://git.openstack.org/openstack/glance.git GLANCE_BRANCH=milestone-proposed # Start A Dev Cloud diff --git a/stackrc b/stackrc index 6adb676866..7eda5a5671 100644 --- a/stackrc +++ b/stackrc @@ -62,7 +62,7 @@ fi # Base GIT Repo URL # Another option is http://review.openstack.org/p -GIT_BASE=${GIT_BASE:-https://github.com} +GIT_BASE=${GIT_BASE:-git://git.openstack.org} # metering service CEILOMETER_REPO=${CEILOMETER_REPO:-${GIT_BASE}/openstack/ceilometer.git} @@ -182,7 +182,7 @@ NOVNC_REPO=${NOVNC_REPO:-${GIT_BASE}/kanaka/noVNC.git} NOVNC_BRANCH=${NOVNC_BRANCH:-master} # ryu service -RYU_REPO=${RYU_REPO:-${GIT_BASE}/osrg/ryu.git} +RYU_REPO=${RYU_REPO:-https://github.com/osrg/ryu.git} RYU_BRANCH=${RYU_BRANCH:-master} # a websockets/html5 or flash powered SPICE console for vm instances diff --git a/tools/build_tempest.sh b/tools/build_tempest.sh index 1758e7da33..6c527f5962 100755 --- a/tools/build_tempest.sh +++ b/tools/build_tempest.sh @@ -2,7 +2,7 @@ # # **build_tempest.sh** -# Checkout and prepare a Tempest repo: https://github.com/openstack/tempest.git +# Checkout and prepare a Tempest repo: git://git.openstack.org/openstack/tempest.git function usage { echo "$0 - Check out and prepare a Tempest repo" diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh index 534723833d..0ae2cb7f9a 100755 --- a/tools/xen/test_functions.sh +++ b/tools/xen/test_functions.sh @@ -111,8 +111,8 @@ function test_no_plugin_directory_found { function test_zip_snapshot_location { diff \ - <(zip_snapshot_location "https://github.com/openstack/nova.git" "master") \ - <(echo "https://github.com/openstack/nova/zipball/master") + <(zip_snapshot_location "git://git.openstack.org/openstack/nova.git" "master") \ + <(echo "git://git.openstack.org/openstack/nova/zipball/master") } function test_create_directory_for_kernels { From f653419b15d244fa7e01a101de93174d282185ed Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Sun, 17 Nov 2013 13:03:52 -0600 Subject: [PATCH 0256/4438] Keystone don't use deprecated token_format option devstack was setting the token_format option in the keystone configuration file. This option is deprecated so should not be used. Change-Id: I047de155f0d9d2a1c009533c2f97f505cc80c6de --- lib/keystone | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 4353ebab1c..978577f55e 100644 --- a/lib/keystone +++ b/lib/keystone @@ -179,7 +179,6 @@ function configure_keystone() { fi iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN" - iniset $KEYSTONE_CONF signing token_format "$KEYSTONE_TOKEN_FORMAT" if [[ "$KEYSTONE_TOKEN_FORMAT" = "UUID" ]]; then iniset $KEYSTONE_CONF token provider keystone.token.providers.uuid.Provider From ca1b85283b2d53e5e6e52a90a57a9310dd948d5c Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Sun, 17 Nov 2013 15:44:32 -0600 Subject: [PATCH 0257/4438] Mute false alarm when installing docker In install_docker.sh, we will restart docker service, then connect to /var/run/docker.sock with retry mechanism. At the first contacting with /var/run/docker.sock, when docker service is not ready, it may complain some error. Mute this false alarm. Change-Id: If00a18d2e3ddee951662e272d47ae84215f16ad2 Closes-Bug: #1252087 --- tools/docker/install_docker.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh index 2e5b510c41..375cfe958b 100755 --- a/tools/docker/install_docker.sh +++ b/tools/docker/install_docker.sh @@ -45,7 +45,7 @@ restart_service docker echo "Waiting for docker daemon to start..." DOCKER_GROUP=$(groups | cut -d' ' -f1) -CONFIGURE_CMD="while ! /bin/echo -e 'GET /v1.3/version HTTP/1.0\n\n' | socat - unix-connect:$DOCKER_UNIX_SOCKET | grep -q '200 OK'; do +CONFIGURE_CMD="while ! /bin/echo -e 'GET /v1.3/version HTTP/1.0\n\n' | socat - unix-connect:$DOCKER_UNIX_SOCKET 2>/dev/null | grep -q '200 OK'; do # Set the right group on docker unix socket before retrying sudo chgrp $DOCKER_GROUP $DOCKER_UNIX_SOCKET sudo chmod g+rw $DOCKER_UNIX_SOCKET From aaac4eede998e6601c879fd359e0cb91c83ba77a Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Mon, 18 Nov 2013 22:12:46 +0000 Subject: [PATCH 0258/4438] Fix stackforge_libs installation step When stackforge_libs is enabled, the WSME and Pecan libraries are checked out from stackforge and installed from source instead of pip. This change introduces a new function to perform the installation without attempting to sync the global requirements list, since the version of setup.py in the global requirements repository breaks the dependencies for WSME (there is no ipaddr library in python 2, so we need to install it, but under python 3 where it is part of the stdlib we cannot include it in the requirements). Fixes bug 1252488 Change-Id: I58357757ac67a919bf70178b76f65fa0a9e16242 --- functions | 22 ++++++++++++++++++---- lib/stackforge | 4 ++-- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/functions b/functions index effdc53afb..ebccb592b7 100644 --- a/functions +++ b/functions @@ -1250,7 +1250,11 @@ function safe_chmod() { # ``pip install -e`` the package, which processes the dependencies # using pip before running `setup.py develop` -# Uses globals ``STACK_USER``, ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR`` +# +# Updates the dependencies in project_dir from the +# openstack/requirements global list before installing anything. +# +# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR`` # setup_develop directory function setup_develop() { local project_dir=$1 @@ -1266,9 +1270,7 @@ function setup_develop() { $SUDO_CMD python update.py $project_dir) fi - pip_install -e $project_dir - # ensure that further actions can do things like setup.py sdist - safe_chown -R $STACK_USER $1/*.egg-info + setup_develop_no_requirements_update $project_dir # We've just gone and possibly modified the user's source tree in an # automated way, which is considered bad form if it's a development @@ -1285,6 +1287,18 @@ function setup_develop() { fi } +# ``pip install -e`` the package, which processes the dependencies +# using pip before running `setup.py develop` +# Uses globals ``STACK_USER`` +# setup_develop_no_requirements_update directory +function setup_develop_no_requirements_update() { + local project_dir=$1 + + pip_install -e $project_dir + # ensure that further actions can do things like setup.py sdist + safe_chown -R $STACK_USER $1/*.egg-info +} + # Service wrapper to start services # start_service service-name diff --git a/lib/stackforge b/lib/stackforge index 4b79de0c94..718b818ff6 100644 --- a/lib/stackforge +++ b/lib/stackforge @@ -39,10 +39,10 @@ function install_stackforge() { cleanup_stackforge git_clone $WSME_REPO $WSME_DIR $WSME_BRANCH - setup_develop $WSME_DIR + setup_develop_no_requirements_update $WSME_DIR git_clone $PECAN_REPO $PECAN_DIR $PECAN_BRANCH - setup_develop $PECAN_DIR + setup_develop_no_requirements_update $PECAN_DIR } # cleanup_stackforge() - purge possibly old versions of stackforge libraries From e578effb330f6ee0adf3b2b19fba1a7f5c64b2f0 Mon Sep 17 00:00:00 2001 From: Stephan Renatus Date: Tue, 19 Nov 2013 13:31:04 +0100 Subject: [PATCH 0259/4438] Make use of STACK_USER instead of relying on USER Quite easily one ends up calling ./stack.sh in an environment that, albeit being user "stack" (for example), doesn't quite meet the expectations of devstack. The errors that follow can be rather hard to track down, as the dependency on `USER` is not mentioned. To remedy this situation, this commit - uses STACK_USER instead of USER and - mentions that dependency in the script headers of lib/* Change-Id: If4cdc39b922ea64b4c0893a0e695ec06349fccc5 --- lib/apache | 6 +++++- lib/ceilometer | 4 ++-- lib/cinder | 2 +- lib/neutron | 3 ++- lib/nova | 2 +- lib/nova_plugins/hypervisor-libvirt | 3 ++- lib/swift | 22 +++++++++++----------- 7 files changed, 24 insertions(+), 18 deletions(-) diff --git a/lib/apache b/lib/apache index 41d6fcc381..8ae78b2181 100644 --- a/lib/apache +++ b/lib/apache @@ -4,6 +4,10 @@ # Dependencies: # # - ``functions`` file +# -``STACK_USER`` must be defined + +# lib/apache exports the following functions: +# # - is_apache_enabled_service # - install_apache_wsgi # - config_apache_wsgi @@ -19,7 +23,7 @@ set +o xtrace # Allow overriding the default Apache user and group, default to # current user and his default group. -APACHE_USER=${APACHE_USER:-$USER} +APACHE_USER=${APACHE_USER:-$STACK_USER} APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)} diff --git a/lib/ceilometer b/lib/ceilometer index dcadb07899..87bb656642 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -67,10 +67,10 @@ function configure_ceilometer() { setup_develop $CEILOMETER_DIR [ ! -d $CEILOMETER_CONF_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_CONF_DIR - sudo chown $USER $CEILOMETER_CONF_DIR + sudo chown $STACK_USER $CEILOMETER_CONF_DIR [ ! -d $CEILOMETER_API_LOG_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_API_LOG_DIR - sudo chown $USER $CEILOMETER_API_LOG_DIR + sudo chown $STACK_USER $CEILOMETER_API_LOG_DIR iniset_rpc_backend ceilometer $CEILOMETER_CONF DEFAULT diff --git a/lib/cinder b/lib/cinder index 20d6e615f6..96d25058ce 100644 --- a/lib/cinder +++ b/lib/cinder @@ -199,7 +199,7 @@ function configure_cinder() { fi TEMPFILE=`mktemp` - echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_CINDER_SUDOER_CMD" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CINDER_SUDOER_CMD" >$TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap diff --git a/lib/neutron b/lib/neutron index 098a589592..c4d9abcadc 100644 --- a/lib/neutron +++ b/lib/neutron @@ -4,6 +4,7 @@ # Dependencies: # ``functions`` file # ``DEST`` must be defined +# ``STACK_USER`` must be defined # ``stack.sh`` calls the entry points in this order: # @@ -730,7 +731,7 @@ function _neutron_setup_rootwrap() { # Set up the rootwrap sudoers for neutron TEMPFILE=`mktemp` - echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap diff --git a/lib/nova b/lib/nova index 5b6f50e9ec..6ab2000111 100644 --- a/lib/nova +++ b/lib/nova @@ -195,7 +195,7 @@ function configure_nova_rootwrap() { # Set up the rootwrap sudoers for nova TEMPFILE=`mktemp` - echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 6fae0b17d0..6f90f4ac17 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -7,6 +7,7 @@ # Dependencies: # ``functions`` file # ``nova`` configuration +# ``STACK_USER`` has to be defined # install_nova_hypervisor - install any external requirements # configure_nova_hypervisor - make configuration changes, including those to other services @@ -68,7 +69,7 @@ EOF" # with 'unix-group:$group'. sudo bash -c "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla [libvirt Management Access] -Identity=unix-user:$USER +Identity=unix-user:$STACK_USER Action=org.libvirt.unix.manage ResultAny=yes ResultInactive=yes diff --git a/lib/swift b/lib/swift index 83c4ebb49d..c932ea7907 100644 --- a/lib/swift +++ b/lib/swift @@ -225,7 +225,7 @@ function configure_swift() { swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true sudo mkdir -p ${SWIFT_CONF_DIR}/{object,container,account}-server - sudo chown -R $USER: ${SWIFT_CONF_DIR} + sudo chown -R ${STACK_USER}: ${SWIFT_CONF_DIR} if [[ "$SWIFT_CONF_DIR" != "/etc/swift" ]]; then # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed. @@ -238,7 +238,7 @@ function configure_swift() { # setup) we configure it with our version of rsync. sed -e " s/%GROUP%/${USER_GROUP}/; - s/%USER%/$USER/; + s/%USER%/${STACK_USER}/; s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,; " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf # rsyncd.conf just prepared for 4 nodes @@ -252,7 +252,7 @@ function configure_swift() { cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER} iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${USER} + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${STACK_USER} iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONF_DIR} @@ -339,7 +339,7 @@ EOF node_path=${SWIFT_DATA_DIR}/${node_number} iniuncomment ${swift_node_config} DEFAULT user - iniset ${swift_node_config} DEFAULT user ${USER} + iniset ${swift_node_config} DEFAULT user ${STACK_USER} iniuncomment ${swift_node_config} DEFAULT bind_port iniset ${swift_node_config} DEFAULT bind_port ${bind_port} @@ -410,7 +410,7 @@ EOF swift_log_dir=${SWIFT_DATA_DIR}/logs rm -rf ${swift_log_dir} mkdir -p ${swift_log_dir}/hourly - sudo chown -R $USER:adm ${swift_log_dir} + sudo chown -R ${STACK_USER}:adm ${swift_log_dir} sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ tee /etc/rsyslog.d/10-swift.conf if is_apache_enabled_service swift; then @@ -425,9 +425,9 @@ function create_swift_disk() { # First do a bit of setup by creating the directories and # changing the permissions so we can run it as our user. - USER_GROUP=$(id -g) + USER_GROUP=$(id -g ${STACK_USER}) sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache,run,logs} - sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} + sudo chown -R ${STACK_USER}:${USER_GROUP} ${SWIFT_DATA_DIR} # Create a loopback disk and format it to XFS. if [[ -e ${SWIFT_DISK_IMAGE} ]]; then @@ -439,7 +439,7 @@ function create_swift_disk() { mkdir -p ${SWIFT_DATA_DIR}/drives/images sudo touch ${SWIFT_DISK_IMAGE} - sudo chown $USER: ${SWIFT_DISK_IMAGE} + sudo chown ${STACK_USER}: ${SWIFT_DISK_IMAGE} truncate -s ${SWIFT_LOOPBACK_DISK_SIZE} ${SWIFT_DISK_IMAGE} @@ -462,9 +462,9 @@ function create_swift_disk() { node_device=${node}/sdb1 [[ -d $node ]] && continue [[ -d $drive ]] && continue - sudo install -o ${USER} -g $USER_GROUP -d $drive - sudo install -o ${USER} -g $USER_GROUP -d $node_device - sudo chown -R $USER: ${node} + sudo install -o ${STACK_USER} -g $USER_GROUP -d $drive + sudo install -o ${STACK_USER} -g $USER_GROUP -d $node_device + sudo chown -R ${STACK_USER}: ${node} done } # create_swift_accounts() - Set up standard swift accounts and extra From bfb3e5ec9cfb6f06bfc4373e3c795e2918602c8e Mon Sep 17 00:00:00 2001 From: Ryan Hsu Date: Mon, 11 Nov 2013 21:20:14 -0800 Subject: [PATCH 0260/4438] Update vsphere image filename pattern The vsphere image filename pattern has been updated so that semi- colons are used to delimit image properties rather than colons, which are not permitted in Windows filesystems. To support back- wards compatibility, colons can still be used. Change-Id: I29a3ac03dcae294326dc8813a66512a79f705f81 Closes-Bug: #1250319 --- functions | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/functions b/functions index effdc53afb..d3de1427ba 100644 --- a/functions +++ b/functions @@ -1362,15 +1362,19 @@ function upload_image() { # Before we can upload vmdk type images to glance, we need to know it's # disk type, storage adapter, and networking adapter. These values are - # passed to glance as custom properties. + # passed to glance as custom properties. # We take these values from the vmdk file if populated. Otherwise, we use # vmdk filename, which is expected in the following format: # - # -:: + # -;; # # If the filename does not follow the above format then the vsphere # driver will supply default values. + vmdk_adapter_type="" + vmdk_disktype="" + vmdk_net_adapter="" + # vmdk adapter type vmdk_adapter_type="$(head -25 $IMAGE | grep -a -F -m 1 'ddb.adapterType =' $IMAGE)" vmdk_adapter_type="${vmdk_adapter_type#*\"}" @@ -1389,17 +1393,15 @@ function upload_image() { #TODO(alegendre): handle streamOptimized once supported by VMware driver. vmdk_disktype="preallocated" fi - property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).+:.+:.+$'` - if [[ ! -z "$property_string" ]]; then - IFS=':' read -a props <<< "$property_string" - if [[ ! -z "${props[0]}" ]]; then - vmdk_disktype="${props[0]}" - fi - if [[ ! -z "${props[1]}" ]]; then - vmdk_adapter_type="${props[1]}" - fi - vmdk_net_adapter="${props[2]}" - fi + + # NOTE: For backwards compatibility reasons, colons may be used in place + # of semi-colons for property delimiters but they are not permitted + # characters in NTFS filesystems. + property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).+[:;].+[:;].+$'` + IFS=':;' read -a props <<< "$property_string" + vmdk_disktype="${props[0]:-$vmdk_disktype}" + vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}" + vmdk_net_adapter="${props[2]:-$vmdk_net_adapter}" glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format bare --disk-format vmdk --property vmware_disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${IMAGE}" return From 75cb61ba39e17f3e3fb0d8a99b9aecf877e88819 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Wed, 20 Nov 2013 00:19:59 +0400 Subject: [PATCH 0261/4438] Handle Savanna service availability in tempest Change-Id: I51300304655803f114d3bb911086cd88aa09638f --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index fca3884e7f..f3578b17c3 100644 --- a/lib/tempest +++ b/lib/tempest @@ -300,7 +300,7 @@ function configure_tempest() { iniset $TEMPEST_CONF cli cli_dir $NOVA_BIN_DIR # service_available - for service in nova cinder glance neutron swift heat horizon ceilometer ironic; do + for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna; do if is_service_enabled $service ; then iniset $TEMPEST_CONF service_available $service "True" else From 40546f79e0e504d2d1470019a61a24da217e14fc Mon Sep 17 00:00:00 2001 From: Emilien Macchi Date: Tue, 24 Sep 2013 15:10:25 +0200 Subject: [PATCH 0262/4438] Add Neutron Metering Agent support In Havana, Neutron has now a Metering Agent which gets meters from virtual routers. This patchs aims to allow devstack using this new service. Change-Id: I17ad83799d60384247b98cc8a93ac032f641c721 Signed-off-by: Emilien Macchi --- README.md | 1 + lib/neutron | 18 ++++++++++++++++ lib/neutron_plugins/services/metering | 30 +++++++++++++++++++++++++++ 3 files changed, 49 insertions(+) create mode 100644 lib/neutron_plugins/services/metering diff --git a/README.md b/README.md index 99e983887e..c94d8bd23a 100644 --- a/README.md +++ b/README.md @@ -139,6 +139,7 @@ following settings in your `localrc` : enable_service q-dhcp enable_service q-l3 enable_service q-meta + enable_service q-metering enable_service neutron # Optional, to enable tempest configuration as part of devstack enable_service tempest diff --git a/lib/neutron b/lib/neutron index 4a3d1b06a6..5334be613b 100644 --- a/lib/neutron +++ b/lib/neutron @@ -202,6 +202,12 @@ source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN # Hardcoding for 1 service plugin for now source $TOP_DIR/lib/neutron_plugins/services/loadbalancer +# Agent metering service plugin functions +# ------------------------------------------- + +# Hardcoding for 1 service plugin for now +source $TOP_DIR/lib/neutron_plugins/services/metering + # VPN service plugin functions # ------------------------------------------- # Hardcoding for 1 service plugin for now @@ -231,6 +237,9 @@ function configure_neutron() { if is_service_enabled q-lbaas; then _configure_neutron_lbaas fi + if is_service_enabled q-metering; then + _configure_neutron_metering + fi if is_service_enabled q-vpn; then _configure_neutron_vpn fi @@ -451,6 +460,10 @@ function start_neutron_agents() { if is_service_enabled q-lbaas; then screen_it q-lbaas "cd $NEUTRON_DIR && python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME" fi + + if is_service_enabled q-metering; then + screen_it q-metering "cd $NEUTRON_DIR && python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" + fi } # stop_neutron() - Stop running processes (non-screen) @@ -630,6 +643,11 @@ function _configure_neutron_lbaas() { neutron_agent_lbaas_configure_agent } +function _configure_neutron_metering() { + neutron_agent_metering_configure_common + neutron_agent_metering_configure_agent +} + function _configure_neutron_fwaas() { neutron_fwaas_configure_common neutron_fwaas_configure_driver diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering new file mode 100644 index 0000000000..629f3b788a --- /dev/null +++ b/lib/neutron_plugins/services/metering @@ -0,0 +1,30 @@ +# Neutron metering plugin +# --------------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +AGENT_METERING_BINARY="$NEUTRON_BIN_DIR/neutron-metering-agent" +METERING_PLUGIN="neutron.services.metering.metering_plugin.MeteringPlugin" + +function neutron_agent_metering_configure_common() { + if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then + Q_SERVICE_PLUGIN_CLASSES=$METERING_PLUGIN + else + Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$METERING_PLUGIN" + fi +} + +function neutron_agent_metering_configure_agent() { + METERING_AGENT_CONF_PATH=/etc/neutron/services/metering + mkdir -p $METERING_AGENT_CONF_PATH + + METERING_AGENT_CONF_FILENAME="$METERING_AGENT_CONF_PATH/metering_agent.ini" + + cp $NEUTRON_DIR/etc/metering_agent.ini $METERING_AGENT_CONF_FILENAME +} + +# Restore xtrace +$MY_XTRACE From d254da5213bf0868663b630dbb1ee99fe9157c6f Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Tue, 19 Nov 2013 21:06:29 -0800 Subject: [PATCH 0263/4438] Set swift timeouts higher Devstack is commonly run in a small slow environment, so bump the timeouts up. node_timeout is how long between read operations a node takes to respond to the proxy server conn_timeout is all about how long it takes a connect() system call to return Change-Id: Ib437466a3fc9274b8aa49b19e4fe7fa26f553419 Co-Authored-By: Peter Portante Related-Bug: #1252514 --- lib/swift | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/swift b/lib/swift index 83c4ebb49d..927194d8d7 100644 --- a/lib/swift +++ b/lib/swift @@ -266,6 +266,15 @@ function configure_swift() { iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} + # Devstack is commonly run in a small slow environment, so bump the + # timeouts up. + # node_timeout is how long between read operations a node takes to + # respond to the proxy server + # conn_timeout is all about how long it takes a connect() system call to + # return + iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server node_timeout 120 + iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server conn_timeout 20 + # Configure Ceilometer if is_service_enabled ceilometer; then iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer use "egg:ceilometer#swift" From 3b80bde8c7345a5e8c217b6c5c256c2f83aa7900 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Wed, 20 Nov 2013 17:51:50 -0800 Subject: [PATCH 0264/4438] Check if flavors exist before creating them. * lib/tempest: When creating the m1.tiny and m1.nano flavors ensure that they don't exist first. This is important for Grenade where code may be run multiple times and should expect that some things preexist. Change-Id: I1772d4334f39d612f8a187eb5311a1b2caee3953 --- lib/tempest | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index ec1fc90b76..cbf6a76610 100644 --- a/lib/tempest +++ b/lib/tempest @@ -73,6 +73,7 @@ function configure_tempest() { local password local line local flavors + local available_flavors local flavors_ref local flavor_lines local public_network_id @@ -142,10 +143,15 @@ function configure_tempest() { # If the ``DEFAULT_INSTANCE_TYPE`` not declared, use the new behavior # Tempest creates instane types for himself if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then - nova flavor-create m1.nano 42 64 0 1 + available_flavors=$(nova flavor-list) + if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then + nova flavor-create m1.nano 42 64 0 1 + fi flavor_ref=42 boto_instance_type=m1.nano - nova flavor-create m1.micro 84 128 0 1 + if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then + nova flavor-create m1.micro 84 128 0 1 + fi flavor_ref_alt=84 else # Check Nova for existing flavors and, if set, look for the From 001c7b6c11574f60aecd47a6fc3b8ca54a393105 Mon Sep 17 00:00:00 2001 From: Matt Odden Date: Thu, 21 Nov 2013 22:12:56 +0000 Subject: [PATCH 0265/4438] Remove powervm nova virt driver support The powervm virt driver was removed from nova in a recent change. This functionality is no longer needed in devstack. Change-Id: Iec620938a8cce63e0830fc7b9e9a679b361b4389 --- lib/nova_plugins/hypervisor-powervm | 76 ----------------------------- 1 file changed, 76 deletions(-) delete mode 100644 lib/nova_plugins/hypervisor-powervm diff --git a/lib/nova_plugins/hypervisor-powervm b/lib/nova_plugins/hypervisor-powervm deleted file mode 100644 index 561dd9f00b..0000000000 --- a/lib/nova_plugins/hypervisor-powervm +++ /dev/null @@ -1,76 +0,0 @@ -# lib/nova_plugins/hypervisor-powervm -# Configure the PowerVM hypervisor - -# Enable with: -# VIRT_DRIVER=powervm - -# Dependencies: -# ``functions`` file -# ``nova`` configuration - -# install_nova_hypervisor - install any external requirements -# configure_nova_hypervisor - make configuration changes, including those to other services -# start_nova_hypervisor - start any external services -# stop_nova_hypervisor - stop any external services -# cleanup_nova_hypervisor - remove transient data and cache - -# Save trace setting -MY_XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - - -# Entry Points -# ------------ - -# clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor() { - # This function intentionally left blank - : -} - -# configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor() { - POWERVM_MGR_TYPE=${POWERVM_MGR_TYPE:-"ivm"} - POWERVM_MGR_HOST=${POWERVM_MGR_HOST:-"powervm.host"} - POWERVM_MGR_USER=${POWERVM_MGR_USER:-"padmin"} - POWERVM_MGR_PASSWD=${POWERVM_MGR_PASSWD:-"password"} - POWERVM_IMG_REMOTE_PATH=${POWERVM_IMG_REMOTE_PATH:-"/tmp"} - POWERVM_IMG_LOCAL_PATH=${POWERVM_IMG_LOCAL_PATH:-"/tmp"} - iniset $NOVA_CONF DEFAULT compute_driver nova.virt.powervm.PowerVMDriver - iniset $NOVA_CONF DEFAULT powervm_mgr_type $POWERVM_MGR_TYPE - iniset $NOVA_CONF DEFAULT powervm_mgr $POWERVM_MGR_HOST - iniset $NOVA_CONF DEFAULT powervm_mgr_user $POWERVM_MGR_USER - iniset $NOVA_CONF DEFAULT powervm_mgr_passwd $POWERVM_MGR_PASSWD - iniset $NOVA_CONF DEFAULT powervm_img_remote_path $POWERVM_IMG_REMOTE_PATH - iniset $NOVA_CONF DEFAULT powervm_img_local_path $POWERVM_IMG_LOCAL_PATH -} - -# install_nova_hypervisor() - Install external components -function install_nova_hypervisor() { - # This function intentionally left blank - : -} - -# start_nova_hypervisor - Start any required external services -function start_nova_hypervisor() { - # This function intentionally left blank - : -} - -# stop_nova_hypervisor - Stop any external services -function stop_nova_hypervisor() { - # This function intentionally left blank - : -} - - -# Restore xtrace -$MY_XTRACE - -# Local variables: -# mode: shell-script -# End: From 6db28923263b1d99f03069ccac6126a13bac0b5e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 22 Nov 2013 12:16:02 -0500 Subject: [PATCH 0266/4438] Add hacking rules for shell scripts This is an attempt to collect the rules that we live by in devstack that are generally held. Writing these down help us figure out ways to put them into bash8 over time. These are a starting point for conversation. Change-Id: Id2b750665871ebbeddf4694ba080c75d2f6f443e --- HACKING.rst | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/HACKING.rst b/HACKING.rst index 3c08e679d9..103b579621 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -227,3 +227,51 @@ These scripts are executed serially by ``exercise.sh`` in testing situations. or graciously handle possible artifacts left over from previous runs if executed again. It is acceptable to require a reboot or even a re-install of DevStack to restore a clean test environment. + + +Bash Style Guidelines +~~~~~~~~~~~~~~~~~~~~~ +Devstack defines a bash set of best practices for maintaining large +collections of bash scripts. These should be considered as part of the +review process. + +We have a preliminary enforcing script for this called bash8 (only a +small number of these rules are enforced). + +Whitespace Rules +---------------- + +- lines should not include trailing whitespace +- there should be no hard tabs in the file +- indents are 4 spaces, and all indentation should be some multiple of + them + +Control Structure Rules +----------------------- +- then should be on the same line as the if +- do should be on the same line as the for + +Example:: + + if [[ -r $TOP_DIR/local.conf ]]; then + LRC=$(get_meta_section_files $TOP_DIR/local.conf local) + for lfile in $LRC; do + if [[ "$lfile" == "localrc" ]]; then + if [[ -r $TOP_DIR/localrc ]]; then + warn $LINENO "localrc and local.conf:[[local]] both exist, using localrc" + else + echo "# Generated file, do not edit" >$TOP_DIR/.localrc.auto + get_meta_section $TOP_DIR/local.conf local $lfile >>$TOP_DIR/.localrc.auto + fi + fi + done + fi + +Variables and Functions +----------------------- +- functions should be used whenever possible for clarity +- functions should use ``local`` variables as much as possible to + ensure they are isolated from the rest of the environment +- local variables should be lower case, global variables should be + upper case +- function names should_have_underscores, NotCamelCase. From 3a82319ad7172de938cb1e7e01a270f41d09fe3d Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 24 Nov 2013 18:53:20 +0100 Subject: [PATCH 0267/4438] fixup_stuff prettytable min version The minimum prettytable version is changed from 0.6 to 0.7 in the global requirements. If the system has an older prettytable version the fixup_stuff does not takes effect in time, because at fixup time the system has the old version. Ensure the fixup installs the minimum required version in time. Change-Id: If1737dacb25db73b68e707953d05576ad8a97da7 --- tools/fixup_stuff.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index f9362307d8..5fb47dc29b 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -51,7 +51,7 @@ function get_package_path() { # Fix prettytable 0.7.2 permissions # Don't specify --upgrade so we use the existing package if present -pip_install prettytable +pip_install 'prettytable>0.7' PACKAGE_DIR=$(get_package_path prettytable) # Only fix version 0.7.2 dir=$(echo $PACKAGE_DIR/prettytable-0.7.2*) From 480309eea527d7721148f66f557772da0e9b5941 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sat, 23 Nov 2013 13:02:45 -0500 Subject: [PATCH 0268/4438] Allow overriding USE_GET_PIP via env vars devstack-gate wants to pre-cache and then use get-pip, but we can't throw the flag currently. Make the flag default settable via env vars. Change-Id: I661b52670b6ce494666cbdd611e4eee6b96c8321 Partial-Bug: #1254275 --- tools/install_pip.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 455323e6fa..6b9b25e3e9 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -26,6 +26,7 @@ FILES=$TOP_DIR/files # Handle arguments +USE_GET_PIP=${USE_GET_PIP:-0} INSTALL_PIP_VERSION=${INSTALL_PIP_VERSION:-"1.4.1"} while [[ -n "$1" ]]; do case $1 in @@ -63,7 +64,7 @@ function get_versions() { function install_get_pip() { if [[ ! -r $FILES/get-pip.py ]]; then (cd $FILES; \ - curl $PIP_GET_PIP_URL; \ + curl -O $PIP_GET_PIP_URL; \ ) fi sudo python $FILES/get-pip.py From 674ee84ec6c6cd2e802e132db64855d2f36c16e1 Mon Sep 17 00:00:00 2001 From: Robert Myers Date: Mon, 25 Nov 2013 13:15:35 -0600 Subject: [PATCH 0269/4438] Adding optional colorized log output for trove Change-Id: Ibdaed9c2d1527b4c38551efbc147597e2a668b1a --- lib/trove | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/lib/trove b/lib/trove index c40006bf5d..3f9b1be5f9 100644 --- a/lib/trove +++ b/lib/trove @@ -33,6 +33,17 @@ TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove} TROVE_BIN_DIR=/usr/local/bin +# setup_trove_logging() - Adds logging configuration to conf files +function setup_trove_logging() { + local CONF=$1 + iniset $CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $CONF DEFAULT use_syslog $SYSLOG + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + # Add color to logging output + setup_colorized_logging $CONF DEFAULT tenant user + fi +} + # create_trove_accounts() - Set up common required trove accounts # Tenant User Roles @@ -121,6 +132,9 @@ function configure_trove() { iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT control_exchange trove sed -i "s/localhost/$NETWORK_GATEWAY/g" $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample + setup_trove_logging $TROVE_CONF_DIR/trove.conf + setup_trove_logging $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample + # (Re)create trove taskmanager conf file if needed if is_service_enabled tr-tmgr; then iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT rabbit_password $RABBIT_PASSWORD @@ -130,6 +144,7 @@ function configure_trove() { iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_tenant_name trove iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT + setup_trove_logging $TROVE_CONF_DIR/trove-taskmanager.conf fi # (Re)create trove conductor conf file if needed @@ -141,6 +156,7 @@ function configure_trove() { iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT control_exchange trove + setup_trove_logging $TROVE_CONF_DIR/trove-conductor.conf fi } From bd24a8d0f884d27f47834c917c047b54271c1179 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Fri, 20 Sep 2013 16:26:42 +1000 Subject: [PATCH 0270/4438] Allow deploying keystone with SSL certificates Allow providing certificates through environment variables to be used for keystone, and provide the basis for doing this for other services. It cannot be used in conjunction with tls-proxy as the service provides it's own encrypted endpoint. Impletmenting: blueprint devstack-https Change-Id: I8cf4c9c8c8a6911ae56ebcd14600a9d24cca99a0 --- lib/cinder | 2 ++ lib/glance | 2 ++ lib/heat | 1 + lib/ironic | 1 + lib/keystone | 19 +++++++++++++++- lib/nova | 2 ++ lib/swift | 2 ++ lib/tls | 50 +++++++++++++++++++++++++++++++++++++++++- lib/trove | 4 +++- openrc | 5 +++-- stack.sh | 26 ++++++++++++++++++++-- tools/create_userrc.sh | 5 ++++- 12 files changed, 111 insertions(+), 8 deletions(-) diff --git a/lib/cinder b/lib/cinder index 96d25058ce..9288685365 100644 --- a/lib/cinder +++ b/lib/cinder @@ -209,6 +209,7 @@ function configure_cinder() { inicomment $CINDER_API_PASTE_INI filter:authtoken auth_host inicomment $CINDER_API_PASTE_INI filter:authtoken auth_port inicomment $CINDER_API_PASTE_INI filter:authtoken auth_protocol + inicomment $CINDER_API_PASTE_INI filter:authtoken cafile inicomment $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name inicomment $CINDER_API_PASTE_INI filter:authtoken admin_user inicomment $CINDER_API_PASTE_INI filter:authtoken admin_password @@ -219,6 +220,7 @@ function configure_cinder() { iniset $CINDER_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST iniset $CINDER_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $CINDER_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $CINDER_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA iniset $CINDER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $CINDER_CONF keystone_authtoken admin_user cinder iniset $CINDER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD diff --git a/lib/glance b/lib/glance index eb727f1e2a..c88f2dc472 100644 --- a/lib/glance +++ b/lib/glance @@ -82,6 +82,7 @@ function configure_glance() { iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $GLANCE_REGISTRY_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance @@ -99,6 +100,7 @@ function configure_glance() { iniset $GLANCE_API_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST iniset $GLANCE_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $GLANCE_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $GLANCE_API_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA iniset $GLANCE_API_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $GLANCE_API_CONF keystone_authtoken admin_user glance diff --git a/lib/heat b/lib/heat index 7a9ef0da26..e44a618162 100644 --- a/lib/heat +++ b/lib/heat @@ -96,6 +96,7 @@ function configure_heat() { iniset $HEAT_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $HEAT_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL iniset $HEAT_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $HEAT_CONF keystone_authtoken admin_user heat iniset $HEAT_CONF keystone_authtoken admin_password $SERVICE_PASSWORD diff --git a/lib/ironic b/lib/ironic index 9f86e841d8..099746ae22 100644 --- a/lib/ironic +++ b/lib/ironic @@ -98,6 +98,7 @@ function configure_ironic_api() { iniset $IRONIC_CONF_FILE keystone_authtoken auth_host $KEYSTONE_AUTH_HOST iniset $IRONIC_CONF_FILE keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $IRONIC_CONF_FILE keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $IRONIC_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA iniset $IRONIC_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ iniset $IRONIC_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $IRONIC_CONF_FILE keystone_authtoken admin_user ironic diff --git a/lib/keystone b/lib/keystone index 978577f55e..4a7d7bb717 100644 --- a/lib/keystone +++ b/lib/keystone @@ -4,6 +4,7 @@ # Dependencies: # # - ``functions`` file +# - ``tls`` file # - ``DEST``, ``STACK_USER`` # - ``IDENTITY_API_VERSION`` # - ``BASE_SQL_CONN`` @@ -79,6 +80,13 @@ KEYSTONE_VALID_IDENTITY_BACKENDS=kvs,ldap,pam,sql # valid assignment backends as per dir keystone/identity/backends KEYSTONE_VALID_ASSIGNMENT_BACKENDS=kvs,ldap,sql +# if we are running with SSL use https protocols +if is_ssl_enabled_service "key"; then + KEYSTONE_AUTH_PROTOCOL="https" + KEYSTONE_SERVICE_PROTOCOL="https" +fi + + # Functions # --------- # cleanup_keystone() - Remove residual data files, anything left over from previous @@ -172,6 +180,15 @@ function configure_keystone() { iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(public_port)s/" iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(admin_port)s/" + # Register SSL certificates if provided + if is_ssl_enabled_service key; then + ensure_certificates KEYSTONE + + iniset $KEYSTONE_CONF ssl enable True + iniset $KEYSTONE_CONF ssl certfile $KEYSTONE_SSL_CERT + iniset $KEYSTONE_CONF ssl keyfile $KEYSTONE_SSL_KEY + fi + if is_service_enabled tls-proxy; then # Set the service ports for a proxy to take the originals iniset $KEYSTONE_CONF DEFAULT public_port $KEYSTONE_SERVICE_PORT_INT @@ -373,7 +390,7 @@ function start_keystone() { fi echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then die $LINENO "keystone did not start" fi diff --git a/lib/nova b/lib/nova index 6ab2000111..5fd0bebf65 100644 --- a/lib/nova +++ b/lib/nova @@ -225,6 +225,7 @@ function configure_nova() { inicomment $NOVA_API_PASTE_INI filter:authtoken auth_host inicomment $NOVA_API_PASTE_INI filter:authtoken auth_protocol inicomment $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name + inicomment $NOVA_API_PASTE_INI filter:authtoken cafile inicomment $NOVA_API_PASTE_INI filter:authtoken admin_user inicomment $NOVA_API_PASTE_INI filter:authtoken admin_password fi @@ -399,6 +400,7 @@ function create_nova_conf() { iniset $NOVA_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST iniset $NOVA_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL iniset $NOVA_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $NOVA_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA iniset $NOVA_CONF keystone_authtoken admin_user nova iniset $NOVA_CONF keystone_authtoken admin_password $SERVICE_PASSWORD fi diff --git a/lib/swift b/lib/swift index c103b5ba5f..c0493110b9 100644 --- a/lib/swift +++ b/lib/swift @@ -306,6 +306,7 @@ function configure_swift() { iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_port $KEYSTONE_AUTH_PORT iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken cafile $KEYSTONE_SSL_CA iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift @@ -325,6 +326,7 @@ paste.filter_factory = keystone.middleware.s3_token:filter_factory auth_port = ${KEYSTONE_AUTH_PORT} auth_host = ${KEYSTONE_AUTH_HOST} auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} +cafile = ${KEYSTONE_SSL_CA} auth_token = ${SERVICE_TOKEN} admin_token = ${SERVICE_TOKEN} diff --git a/lib/tls b/lib/tls index a1a7fddc18..6134fa1bad 100644 --- a/lib/tls +++ b/lib/tls @@ -22,7 +22,8 @@ # - make_int_ca # - new_cert $INT_CA_DIR int-server "abc" # - start_tls_proxy HOST_IP 5000 localhost 5000 - +# - ensure_certificates +# - is_ssl_enabled_service # Defaults # -------- @@ -309,6 +310,53 @@ function make_root_CA() { } +# Certificate Input Configuration +# =============================== + +# check to see if the service(s) specified are to be SSL enabled. +# +# Multiple services specified as arguments are ``OR``'ed together; the test +# is a short-circuit boolean, i.e it returns on the first match. +# +# Uses global ``SSL_ENABLED_SERVICES`` +function is_ssl_enabled_service() { + services=$@ + for service in ${services}; do + [[ ,${SSL_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 + done + return 1 +} + + +# Ensure that the certificates for a service are in place. This function does +# not check that a service is SSL enabled, this should already have been +# completed. +# +# The function expects to find a certificate, key and CA certificate in the +# variables {service}_SSL_CERT, {service}_SSL_KEY and {service}_SSL_CA. For +# example for keystone this would be KEYSTONE_SSL_CERT, KEYSTONE_SSL_KEY and +# KEYSTONE_SSL_CA. If it does not find these certificates the program will +# quit. +function ensure_certificates() { + local service=$1 + + local cert_var="${service}_SSL_CERT" + local key_var="${service}_SSL_KEY" + local ca_var="${service}_SSL_CA" + + local cert=${!cert_var} + local key=${!key_var} + local ca=${!ca_var} + + if [[ !($cert && $key && $ca) ]]; then + die $LINENO "Missing either the ${cert_var} ${key_var} or ${ca_var}" \ + "variable to enable SSL for ${service}" + fi + + cat $ca >> $SSL_BUNDLE_FILE +} + + # Proxy Functions # =============== diff --git a/lib/trove b/lib/trove index c40006bf5d..5ba4de5a4f 100644 --- a/lib/trove +++ b/lib/trove @@ -29,7 +29,6 @@ TROVE_DIR=$DEST/trove TROVECLIENT_DIR=$DEST/python-troveclient TROVE_CONF_DIR=/etc/trove TROVE_LOCAL_CONF_DIR=$TROVE_DIR/etc/trove -TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT//v$IDENTITY_API_VERSION TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove} TROVE_BIN_DIR=/usr/local/bin @@ -102,6 +101,7 @@ function configure_trove() { iniset $TROVE_API_PASTE_INI filter:tokenauth auth_host $KEYSTONE_AUTH_HOST iniset $TROVE_API_PASTE_INI filter:tokenauth auth_port $KEYSTONE_AUTH_PORT iniset $TROVE_API_PASTE_INI filter:tokenauth auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $TROVE_API_PASTE_INI filter:tokenauth cafile $KEYSTONE_SSL_CA iniset $TROVE_API_PASTE_INI filter:tokenauth admin_tenant_name $SERVICE_TENANT_NAME iniset $TROVE_API_PASTE_INI filter:tokenauth admin_user trove iniset $TROVE_API_PASTE_INI filter:tokenauth admin_password $SERVICE_PASSWORD @@ -123,6 +123,8 @@ function configure_trove() { # (Re)create trove taskmanager conf file if needed if is_service_enabled tr-tmgr; then + TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT//v$IDENTITY_API_VERSION + iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT rabbit_password $RABBIT_PASSWORD iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT sql_connection `database_connection_url trove` iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT taskmanager_manager trove.taskmanager.manager.Manager diff --git a/openrc b/openrc index 804bb3f3d7..784b00e51b 100644 --- a/openrc +++ b/openrc @@ -58,6 +58,7 @@ export OS_NO_CACHE=${OS_NO_CACHE:-1} HOST_IP=${HOST_IP:-127.0.0.1} SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} +KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL} # Some exercises call glance directly. On a single-node installation, Glance # should be listening on HOST_IP. If its running elsewhere, it can be set here @@ -71,10 +72,10 @@ export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0} # the user/tenant has access to - including nova, glance, keystone, swift, ... # We currently recommend using the 2.0 *identity api*. # -export OS_AUTH_URL=$SERVICE_PROTOCOL://$SERVICE_HOST:5000/v${OS_IDENTITY_API_VERSION} +export OS_AUTH_URL=$KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:5000/v${OS_IDENTITY_API_VERSION} # Set the pointer to our CA certificate chain. Harmless if TLS is not used. -export OS_CACERT=$INT_CA_DIR/ca-chain.pem +export OS_CACERT=${OS_CACERT:-$INT_CA_DIR/ca-chain.pem} # Currently novaclient needs you to specify the *compute api* version. This # needs to match the config of your catalog returned by Keystone. diff --git a/stack.sh b/stack.sh index 47d93bd642..28032def37 100755 --- a/stack.sh +++ b/stack.sh @@ -290,6 +290,10 @@ LOG_COLOR=`trueorfalse True $LOG_COLOR` # Service startup timeout SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} +# Reset the bundle of CA certificates +SSL_BUNDLE_FILE="$DATA_DIR/ca-bundle.pem" +rm -f $SSL_BUNDLE_FILE + # Configure Projects # ================== @@ -798,6 +802,17 @@ fi restart_rpc_backend +# Export Certicate Authority Bundle +# --------------------------------- + +# If certificates were used and written to the SSL bundle file then these +# should be exported so clients can validate their connections. + +if [ -f $SSL_BUNDLE_FILE ]; then + export OS_CACERT=$SSL_BUNDLE_FILE +fi + + # Configure database # ------------------ @@ -1145,6 +1160,7 @@ if is_service_enabled trove; then start_trove fi + # Create account rc files # ======================= @@ -1153,7 +1169,13 @@ fi # which is helpful in image bundle steps. if is_service_enabled nova && is_service_enabled key; then - $TOP_DIR/tools/create_userrc.sh -PA --target-dir $TOP_DIR/accrc + USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc" + + if [ -f $SSL_BUNDLE_FILE ]; then + USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE" + fi + + $TOP_DIR/tools/create_userrc.sh $USERRC_PARAMS fi @@ -1229,7 +1251,7 @@ fi CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT") echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ - SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP; do + SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP KEYSTONE_AUTH_PROTOCOL OS_CACERT; do echo $i=${!i} >>$TOP_DIR/.stackenv done diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index 8383fe7d77..5f4c48660b 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -43,6 +43,7 @@ Optional Arguments --os-tenant-name --os-tenant-id --os-auth-url +--os-cacert --target-dir --skip-tenant --debug @@ -53,7 +54,7 @@ $0 -P -C mytenant -u myuser -p mypass EOF } -if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,help,debug -- "$@") +if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,os-cacert:,help,debug -- "$@") then #parse error display_help @@ -80,6 +81,7 @@ do --os-tenant-id) export OS_TENANT_ID=$2; shift ;; --skip-tenant) SKIP_TENANT="$SKIP_TENANT$2,"; shift ;; --os-auth-url) export OS_AUTH_URL=$2; shift ;; + --os-cacert) export OS_CACERT=$2; shift ;; --target-dir) ACCOUNT_DIR=$2; shift ;; --debug) set -o xtrace ;; -u) MODE=${MODE:-one}; USER_NAME=$2; shift ;; @@ -201,6 +203,7 @@ export OS_USERNAME="$user_name" # Openstack Tenant ID = $tenant_id export OS_TENANT_NAME="$tenant_name" export OS_AUTH_URL="$OS_AUTH_URL" +export OS_CACERT="$OS_CACERT" export EC2_CERT="$ec2_cert" export EC2_PRIVATE_KEY="$ec2_private_key" export EC2_USER_ID=42 #not checked by nova (can be a 12-digit id) From a677b7fe828445968cdc714a630c74d35321c8fb Mon Sep 17 00:00:00 2001 From: Emilien Macchi Date: Mon, 25 Nov 2013 23:40:20 +0100 Subject: [PATCH 0271/4438] Move neutron cache dir into a function Taking the model of Nova, this patch aims to move the cache directory management into a function with the goal to reuse it somewhere else like Grenade. Change-Id: I93df52f69ef339e6528b88d88d4ea70e0b725893 Signed-off-by: Emilien Macchi --- lib/neutron | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/lib/neutron b/lib/neutron index 70417be5d3..8b0656bc26 100644 --- a/lib/neutron +++ b/lib/neutron @@ -17,6 +17,7 @@ # - configure_neutron_third_party # - init_neutron_third_party # - start_neutron_third_party +# - create_neutron_cache_dir # - create_nova_conf_neutron # - start_neutron_service_and_check # - create_neutron_initial_network @@ -296,6 +297,14 @@ function create_nova_conf_neutron() { fi } +# create_neutron_cache_dir() - Part of the _neutron_setup_keystone() process +function create_neutron_cache_dir() { + # Create cache dir + sudo mkdir -p $NEUTRON_AUTH_CACHE_DIR + sudo chown $STACK_USER $NEUTRON_AUTH_CACHE_DIR + rm -f $NEUTRON_AUTH_CACHE_DIR/* +} + # create_neutron_accounts() - Set up common required neutron accounts # Tenant User Roles @@ -782,9 +791,7 @@ function _neutron_setup_keystone() { if [[ -z $skip_auth_cache ]]; then iniset $conf_file $section signing_dir $NEUTRON_AUTH_CACHE_DIR # Create cache dir - sudo mkdir -p $NEUTRON_AUTH_CACHE_DIR - sudo chown $STACK_USER $NEUTRON_AUTH_CACHE_DIR - rm -f $NEUTRON_AUTH_CACHE_DIR/* + create_neutron_cache_dir fi } From cee4b3bddff851d875562bf9ce27b2754b75b36a Mon Sep 17 00:00:00 2001 From: Peter Portante Date: Wed, 20 Nov 2013 14:33:16 -0500 Subject: [PATCH 0272/4438] Shorten PKI token logging Log only the first 12 characters of auth-token for the Swift API, since PKI based auth-tokens from keystone can huge (>> 2K). Also tidy up a comment. Change-Id: Ib784e8ecdcb7e371fe03458c7fd82b4460fa82b9 Signed-off-by: Peter Portante --- lib/swift | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/lib/swift b/lib/swift index c103b5ba5f..ce13868f4a 100644 --- a/lib/swift +++ b/lib/swift @@ -96,6 +96,13 @@ SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} SWIFT_REPLICAS=${SWIFT_REPLICAS:-1} SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS}) +# Set ``SWIFT_LOG_TOKEN_LENGTH`` to configure how many characters of an auth +# token should be placed in the logs. When keystone is used with PKI tokens, +# the token values can be huge, seemingly larger the 2K, at the least. We +# restrict it here to a default of 12 characters, which should be enough to +# trace through the logs when looking for its use. +SWIFT_LOG_TOKEN_LENGTH=${SWIFT_LOG_TOKEN_LENGTH:-12} + # Set ``OBJECT_PORT_BASE``, ``CONTAINER_PORT_BASE``, ``ACCOUNT_PORT_BASE`` # Port bases used in port number calclution for the service "nodes" # The specified port number will be used, the additinal ports calculated by @@ -281,6 +288,9 @@ function configure_swift() { SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer" fi + # Restrict the length of auth tokens in the swift proxy-server logs. + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:proxy-logging reveal_sensitive_prefix ${SWIFT_LOG_TOKEN_LENGTH} + # By default Swift will be installed with keystone and tempauth middleware # and add the swift3 middleware if its configured for it. The token for # tempauth would be prefixed with the reseller_prefix setting `TEMPAUTH_` the @@ -336,7 +346,7 @@ EOF cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONF_DIR}/swift.conf iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH} - # This function generates an object/account/proxy configuration + # This function generates an object/container/account configuration # emulating 4 nodes on different ports function generate_swift_config() { local swift_node_config=$1 From 8afc8935362388c54101e4d34b3310aa2e57c412 Mon Sep 17 00:00:00 2001 From: Peter Portante Date: Wed, 20 Nov 2013 17:34:39 -0500 Subject: [PATCH 0273/4438] Use the swift logging adapter for txn IDs Change-Id: I2b2308eb9606279cffc1965fc3b86e9597d63e87 Signed-off-by: Peter Portante --- lib/swift | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/swift b/lib/swift index ce13868f4a..40722ab030 100644 --- a/lib/swift +++ b/lib/swift @@ -321,6 +321,10 @@ function configure_swift() { iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken signing_dir $SWIFT_AUTH_CACHE_DIR + # This causes the authtoken middleware to use the same python logging + # adapter provided by the swift proxy-server, so that request transaction + # IDs will included in all of its log messages. + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken log_name swift iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles From b9e25135c51ee29edbdf48d41e1cb637188cc358 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 1 Oct 2013 14:45:04 -0500 Subject: [PATCH 0274/4438] freshen the LDAP support * Build the base DN from a given domain name * Remove all hard-coded names to allow configuration of base DN * Fix manager DN (cn=Manager,dc=...) * Add ldap init_ldap() * Add support for clean.sh Change-Id: Ieb69be9740653645b8e000574ad3fe59a0f97540 --- clean.sh | 6 + files/apts/ldap | 2 +- .../ldap/{openstack.ldif => keystone.ldif.in} | 16 +- files/ldap/manager.ldif.in | 9 +- ...e-config.ldif => suse-base-config.ldif.in} | 4 +- lib/keystone | 14 +- lib/ldap | 146 +++++++++++++----- 7 files changed, 141 insertions(+), 56 deletions(-) rename files/ldap/{openstack.ldif => keystone.ldif.in} (54%) rename files/ldap/{base-config.ldif => suse-base-config.ldif.in} (77%) diff --git a/clean.sh b/clean.sh index 395941ae21..480a81214f 100755 --- a/clean.sh +++ b/clean.sh @@ -15,6 +15,8 @@ TOP_DIR=$(cd $(dirname "$0") && pwd) # Import common functions source $TOP_DIR/functions +FILES=$TOP_DIR/files + # Load local configuration source $TOP_DIR/stackrc @@ -84,6 +86,10 @@ cleanup_nova cleanup_neutron cleanup_swift +if is_service_enabled ldap; then + cleanup_ldap +fi + # Do the hypervisor cleanup until this can be moved back into lib/nova if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then cleanup_nova_hypervisor diff --git a/files/apts/ldap b/files/apts/ldap index 81a00f27bf..26f7aeffe3 100644 --- a/files/apts/ldap +++ b/files/apts/ldap @@ -1,3 +1,3 @@ ldap-utils -slapd # NOPRIME +slapd python-ldap diff --git a/files/ldap/openstack.ldif b/files/ldap/keystone.ldif.in similarity index 54% rename from files/ldap/openstack.ldif rename to files/ldap/keystone.ldif.in index 02caf3f368..cf51907cf6 100644 --- a/files/ldap/openstack.ldif +++ b/files/ldap/keystone.ldif.in @@ -1,26 +1,26 @@ -dn: dc=openstack,dc=org -dc: openstack +dn: ${BASE_DN} objectClass: dcObject objectClass: organizationalUnit -ou: openstack +dc: ${BASE_DC} +ou: ${BASE_DC} -dn: ou=UserGroups,dc=openstack,dc=org +dn: ou=UserGroups,${BASE_DN} objectClass: organizationalUnit ou: UserGroups -dn: ou=Users,dc=openstack,dc=org +dn: ou=Users,${BASE_DN} objectClass: organizationalUnit ou: Users -dn: ou=Roles,dc=openstack,dc=org +dn: ou=Roles,${BASE_DN} objectClass: organizationalUnit ou: Roles -dn: ou=Projects,dc=openstack,dc=org +dn: ou=Projects,${BASE_DN} objectClass: organizationalUnit ou: Projects -dn: cn=9fe2ff9ee4384b1894a90878d3e92bab,ou=Roles,dc=openstack,dc=org +dn: cn=9fe2ff9ee4384b1894a90878d3e92bab,ou=Roles,${BASE_DN} objectClass: organizationalRole ou: _member_ cn: 9fe2ff9ee4384b1894a90878d3e92bab diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in index e522150f2e..de3b69de7c 100644 --- a/files/ldap/manager.ldif.in +++ b/files/ldap/manager.ldif.in @@ -1,10 +1,15 @@ dn: olcDatabase={${LDAP_OLCDB_NUMBER}}hdb,cn=config changetype: modify replace: olcSuffix -olcSuffix: dc=openstack,dc=org +olcSuffix: ${BASE_DN} - replace: olcRootDN -olcRootDN: dc=Manager,dc=openstack,dc=org +olcRootDN: ${MANAGER_DN} - ${LDAP_ROOTPW_COMMAND}: olcRootPW olcRootPW: ${SLAPPASS} +- +replace: olcDbIndex +olcDbIndex: objectClass eq +olcDbIndex: default pres,eq +olcDbIndex: cn,sn,givenName,co diff --git a/files/ldap/base-config.ldif b/files/ldap/suse-base-config.ldif.in similarity index 77% rename from files/ldap/base-config.ldif rename to files/ldap/suse-base-config.ldif.in index 026d8bc0fc..00256ee9d8 100644 --- a/files/ldap/base-config.ldif +++ b/files/ldap/suse-base-config.ldif.in @@ -12,8 +12,10 @@ objectClass: olcSchemaConfig cn: schema include: file:///etc/openldap/schema/core.ldif +include: file:///etc/openldap/schema/cosine.ldif +include: file:///etc/openldap/schema/inetorgperson.ldif dn: olcDatabase={1}hdb,cn=config objectClass: olcHdbConfig olcDbDirectory: /var/lib/ldap -olcSuffix: dc=openstack,dc=org +olcSuffix: ${BASE_DN} diff --git a/lib/keystone b/lib/keystone index c1fa0af8af..76eff54e35 100644 --- a/lib/keystone +++ b/lib/keystone @@ -143,17 +143,17 @@ function configure_keystone() { if is_service_enabled ldap; then #Set all needed ldap values - iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD - iniset $KEYSTONE_CONF ldap user "dc=Manager,dc=openstack,dc=org" - iniset $KEYSTONE_CONF ldap suffix "dc=openstack,dc=org" + iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD + iniset $KEYSTONE_CONF ldap user $LDAP_MANAGER_DN + iniset $KEYSTONE_CONF ldap suffix $LDAP_BASE_DN iniset $KEYSTONE_CONF ldap use_dumb_member "True" iniset $KEYSTONE_CONF ldap user_attribute_ignore "enabled,email,tenants,default_project_id" iniset $KEYSTONE_CONF ldap tenant_attribute_ignore "enabled" iniset $KEYSTONE_CONF ldap tenant_domain_id_attribute "businessCategory" iniset $KEYSTONE_CONF ldap tenant_desc_attribute "description" - iniset $KEYSTONE_CONF ldap tenant_tree_dn "ou=Projects,dc=openstack,dc=org" + iniset $KEYSTONE_CONF ldap tenant_tree_dn "ou=Projects,$LDAP_BASE_DN" iniset $KEYSTONE_CONF ldap user_domain_id_attribute "businessCategory" - iniset $KEYSTONE_CONF ldap user_tree_dn "ou=Users,dc=openstack,dc=org" + iniset $KEYSTONE_CONF ldap user_tree_dn "ou=Users,$LDAP_BASE_DN" iniset $KEYSTONE_CONF DEFAULT member_role_id "9fe2ff9ee4384b1894a90878d3e92bab" iniset $KEYSTONE_CONF DEFAULT member_role_name "_member_" fi @@ -320,6 +320,10 @@ create_keystone_accounts() { # init_keystone() - Initialize databases, etc. function init_keystone() { + if is_service_enabled ldap; then + init_ldap + fi + # (Re)create keystone database recreate_database keystone utf8 diff --git a/lib/ldap b/lib/ldap index 80992a7a09..e4bd41624d 100644 --- a/lib/ldap +++ b/lib/ldap @@ -9,68 +9,137 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace + +LDAP_DOMAIN=${LDAP_DOMAIN:-openstack.org} +# Make an array of domain components +DC=(${LDAP_DOMAIN/./ }) + +# Leftmost domain component used in top-level entry +LDAP_BASE_DC=${DC[0]} + +# Build the base DN +dn="" +for dc in ${DC[*]}; do + dn="$dn,dc=$dc" +done +LDAP_BASE_DN=${dn#,} + +LDAP_MANAGER_DN="${LDAP_MANAGER_DN:-cn=Manager,${LDAP_BASE_DN}}" +LDAP_URL=${LDAP_URL:-ldap://localhost} + LDAP_SERVICE_NAME=slapd +if is_ubuntu; then + LDAP_OLCDB_NUMBER=1 + LDAP_ROOTPW_COMMAND=replace +elif is_fedora; then + LDAP_OLCDB_NUMBER=2 + LDAP_ROOTPW_COMMAND=add +elif is_suse; then + # SUSE has slappasswd in /usr/sbin/ + PATH=$PATH:/usr/sbin/ + LDAP_OLCDB_NUMBER=1 + LDAP_ROOTPW_COMMAND=add + LDAP_SERVICE_NAME=ldap +fi + + # Functions # --------- +# Perform common variable substitutions on the data files +# _ldap_varsubst file +function _ldap_varsubst() { + local infile=$1 + sed -e " + s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER| + s|\${SLAPPASS}|$SLAPPASS| + s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND| + s|\${BASE_DC}|$LDAP_BASE_DC| + s|\${BASE_DN}|$LDAP_BASE_DN| + s|\${MANAGER_DN}|$LDAP_MANAGER_DN| + " $infile +} + +# clean_ldap() - Remove ldap server +function cleanup_ldap() { + uninstall_package $(get_packages ldap) + if is_ubuntu; then + uninstall_package slapd ldap-utils libslp1 + sudo rm -rf /etc/ldap/ldap.conf /var/lib/ldap + elif is_fedora; then + sudo rm -rf /etc/openldap /var/lib/ldap + elif is_suse; then + sudo rm -rf /var/lib/ldap + fi +} + +# init_ldap +# init_ldap() - Initialize databases, etc. +function init_ldap() { + local keystone_ldif + + TMP_LDAP_DIR=$(mktemp -d -t ldap.$$.XXXXXXXXXX) + + # Remove data but not schemas + clear_ldap_state + + # Add our top level ldap nodes + if ldapsearch -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -b "$LDAP_BASE_DN" | grep -q "Success"; then + printf "LDAP already configured for $LDAP_BASE_DC\n" + else + printf "Configuring LDAP for $LDAP_BASE_DC\n" + # If BASE_DN is changed, the user may override the default file + if [[ -r $FILES/ldap/${LDAP_BASE_DC}.ldif.in ]]; then + keystone_ldif=${LDAP_BASE_DC}.ldif + else + keystone_ldif=keystone.ldif + fi + _ldap_varsubst $FILES/ldap/${keystone_ldif}.in >$TMP_LDAP_DIR/${keystone_ldif} + if [[ -r $TMP_LDAP_DIR/${keystone_ldif} ]]; then + ldapadd -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -c -f $TMP_LDAP_DIR/${keystone_ldif} + fi + fi + + rm -rf TMP_LDAP_DIR +} + # install_ldap # install_ldap() - Collect source and prepare function install_ldap() { echo "Installing LDAP inside function" - echo "LDAP_PASSWORD is $LDAP_PASSWORD" echo "os_VENDOR is $os_VENDOR" - printf "installing" + + TMP_LDAP_DIR=$(mktemp -d -t ldap.$$.XXXXXXXXXX) + + printf "installing OpenLDAP" if is_ubuntu; then - LDAP_OLCDB_NUMBER=1 - LDAP_ROOTPW_COMMAND=replace - sudo DEBIAN_FRONTEND=noninteractive apt-get install slapd ldap-utils - #automatically starts LDAP on ubuntu so no need to call start_ldap + # Ubuntu automatically starts LDAP so no need to call start_ldap() + : elif is_fedora; then - LDAP_OLCDB_NUMBER=2 - LDAP_ROOTPW_COMMAND=add start_ldap elif is_suse; then - LDAP_OLCDB_NUMBER=1 - LDAP_ROOTPW_COMMAND=add - LDAP_SERVICE_NAME=ldap - # SUSE has slappasswd in /usr/sbin/ - PATH=$PATH:/usr/sbin/ - sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $FILES/ldap/base-config.ldif + _ldap_varsubst $FILES/ldap/suse-base-config.ldif.in >$TMP_LDAP_DIR/suse-base-config.ldif + sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $TMP_LDAP_DIR/suse-base-config.ldif sudo sed -i '/^OPENLDAP_START_LDAPI=/s/"no"/"yes"/g' /etc/sysconfig/openldap start_ldap fi - printf "generate password file" - SLAPPASS=`slappasswd -s $LDAP_PASSWORD` - - printf "secret is $SLAPPASS\n" - #create manager.ldif - TMP_MGR_DIFF_FILE=`mktemp -t manager_ldiff.$$.XXXXXXXXXX.ldif` - sed -e "s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER|" -e "s|\${SLAPPASS}|$SLAPPASS|" -e "s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND|" $FILES/ldap/manager.ldif.in >> $TMP_MGR_DIFF_FILE + echo "LDAP_PASSWORD is $LDAP_PASSWORD" + SLAPPASS=$(slappasswd -s $LDAP_PASSWORD) + printf "LDAP secret is $SLAPPASS\n" - #update ldap olcdb - sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_MGR_DIFF_FILE + # Create manager.ldif and add to olcdb + _ldap_varsubst $FILES/ldap/manager.ldif.in >$TMP_LDAP_DIR/manager.ldif + sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_LDAP_DIR/manager.ldif # On fedora we need to manually add cosine and inetorgperson schemas - if is_fedora || is_suse; then + if is_fedora; then sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/cosine.ldif sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/inetorgperson.ldif fi - # add our top level ldap nodes - if ldapsearch -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -b dc=openstack,dc=org | grep -q "Success"; then - printf "LDAP already configured for OpenStack\n" - if [[ "$KEYSTONE_CLEAR_LDAP" == "yes" ]]; then - # clear LDAP state - clear_ldap_state - # reconfigure LDAP for OpenStack - ldapadd -c -x -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -w $LDAP_PASSWORD -f $FILES/ldap/openstack.ldif - fi - else - printf "Configuring LDAP for OpenStack\n" - ldapadd -c -x -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -w $LDAP_PASSWORD -f $FILES/ldap/openstack.ldif - fi + rm -rf TMP_LDAP_DIR } # start_ldap() - Start LDAP @@ -78,7 +147,6 @@ function start_ldap() { sudo service $LDAP_SERVICE_NAME restart } - # stop_ldap() - Stop LDAP function stop_ldap() { sudo service $LDAP_SERVICE_NAME stop @@ -86,7 +154,7 @@ function stop_ldap() { # clear_ldap_state() - Clear LDAP State function clear_ldap_state() { - ldapdelete -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -r "dc=openstack,dc=org" + ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN" } # Restore xtrace From 1c402286cff1dfda5182020e4956f73e7d063d71 Mon Sep 17 00:00:00 2001 From: Gordon Chung Date: Tue, 26 Nov 2013 13:30:11 -0500 Subject: [PATCH 0275/4438] split collector service the ceilometer collector service has been split into two: ceilometer-collector and ceilometer-agent-notification Change-Id: I6114fd7f3e063abfa74d48d402dc863bccd249b6 Blueprint: split-collector --- lib/ceilometer | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 8e2970c652..fac3be14a9 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -3,7 +3,7 @@ # To enable a minimal set of Ceilometer services, add the following to localrc: # -# enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api +# enable_service ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api # # To ensure Ceilometer alarming services are enabled also, further add to the localrc: # @@ -145,6 +145,7 @@ function start_ceilometer() { screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" fi screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF" + screen_it ceilometer-anotification "cd ; ceilometer-agent-notification --config-file $CEILOMETER_CONF" screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF" screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" @@ -160,7 +161,7 @@ function start_ceilometer() { # stop_ceilometer() - Stop running processes function stop_ceilometer() { # Kill the ceilometer screen windows - for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do + for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do screen -S $SCREEN_NAME -p $serv -X kill done } From afbc631cb8c89316bbecbf0f2c601103304e1994 Mon Sep 17 00:00:00 2001 From: Tomoe Sugihara Date: Thu, 14 Nov 2013 20:02:47 +0000 Subject: [PATCH 0276/4438] Make tempest L3 capable plugin aware. With this patch, the public network config in tempest.conf will be done for the plugins that support L3. Change-Id: I820fe300fac45ff92d1281ff0c43ebc137783210 --- lib/neutron | 7 +++++++ lib/tempest | 3 ++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 8b0656bc26..6eabef5b3e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -115,6 +115,13 @@ Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} # nova vif driver that all plugins should use NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} +# The next two variables are configured by plugin +# e.g. _configure_neutron_l3_agent or lib/neutron_plugins/* +# +# The plugin supports L3. +Q_L3_ENABLED=${Q_L3_ENABLED:-False} +# L3 routers exist per tenant +Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-False} # List of config file names in addition to the main plugin config file # See _configure_neutron_common() for details about setting it up diff --git a/lib/tempest b/lib/tempest index 803b740221..7932fe69a3 100644 --- a/lib/tempest +++ b/lib/tempest @@ -15,6 +15,7 @@ # - ``PUBLIC_NETWORK_NAME`` # - ``Q_USE_NAMESPACE`` # - ``Q_ROUTER_NAME`` +# - ``Q_L3_ENABLED`` # - ``VIRT_DRIVER`` # - ``LIBVIRT_TYPE`` # - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone @@ -200,7 +201,7 @@ function configure_tempest() { ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method} - if is_service_enabled q-l3; then + if [ "$Q_L3_ENABLED" = "True" ]; then public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \ awk '{print $2}') if [ "$Q_USE_NAMESPACE" == "False" ]; then From 3d94736b60d9f3c2f159e81eab5841dba255515a Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Wed, 27 Nov 2013 10:06:58 +0100 Subject: [PATCH 0277/4438] Assign unique name to each fake nova-compute Without a unique name, the scheduler (and anything else, really) will consider each of these nova-compute processes as being one and the same, so only one entry in the services table, only one hypervisor seen by the scheduler, etc. Assigning unique names lets us simulate an arbitrary amount of nova-computes which is very handy for benchmarking the scheduler. Change-Id: Ie03aad81bd2a8e73b876a9eae934bc00bf2f71e9 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 6ab2000111..43a33127a9 100644 --- a/lib/nova +++ b/lib/nova @@ -650,7 +650,7 @@ function start_nova_compute() { screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'" elif [[ "$VIRT_DRIVER" = 'fake' ]]; then for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do - screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" + screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf --config-file <(echo -e '[DEFAULT]\nhost=${HOSTNAME}${i}')" done else if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then From 06b345e509f7fa213be17715f9ac581a51f2fd56 Mon Sep 17 00:00:00 2001 From: Flaper Fesp Date: Wed, 4 Sep 2013 15:35:47 +0200 Subject: [PATCH 0278/4438] Add marconi support to devstack Marconi has an optional dependency on keystone for authentication. This code was tested with everything enabled and also with the following localrc: STACK_USER=fedora SERVICE_TOKEN=secrete ADMIN_PASSWORD=secrete MYSQL_PASSWORD=secrete RABBIT_PASSWORD=secrete SERVICE_PASSWORD=secrete disable_all_services enable_service qpid enable_service key enable_service mysql enable_service marconi-server Implements blueprint marconi-devstack-integration Implements blueprint devstack-support Change-Id: I13495bcc5c5eb66cee641894e9f84a0089460c8b --- exercises/marconi.sh | 43 ++++++++++ extras.d/70-marconi.sh | 29 +++++++ files/apts/marconi-server | 3 + files/rpms/marconi-server | 3 + lib/marconi | 171 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 249 insertions(+) create mode 100755 exercises/marconi.sh create mode 100644 extras.d/70-marconi.sh create mode 100644 files/apts/marconi-server create mode 100644 files/rpms/marconi-server create mode 100644 lib/marconi diff --git a/exercises/marconi.sh b/exercises/marconi.sh new file mode 100755 index 0000000000..1b9788dce6 --- /dev/null +++ b/exercises/marconi.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +# **marconi.sh** + +# Sanity check that Marconi started if enabled + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occurred. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +is_service_enabled marconi-server || exit 55 + +curl http://$SERVICE_HOST:8888/v1/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Marconi API not functioning!" + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" diff --git a/extras.d/70-marconi.sh b/extras.d/70-marconi.sh new file mode 100644 index 0000000000..a96a4c546c --- /dev/null +++ b/extras.d/70-marconi.sh @@ -0,0 +1,29 @@ +# marconi.sh - Devstack extras script to install Marconi + +if is_service_enabled marconi-server; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/marconi + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + echo_summary "Installing Marconi" + install_marconiclient + install_marconi + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + echo_summary "Configuring Marconi" + configure_marconi + configure_marconiclient + + if is_service_enabled key; then + create_marconi_accounts + fi + + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + echo_summary "Initializing Marconi" + init_marconi + start_marconi + fi + + if [[ "$1" == "unstack" ]]; then + stop_marconi + fi +fi diff --git a/files/apts/marconi-server b/files/apts/marconi-server new file mode 100644 index 0000000000..bc7ef22445 --- /dev/null +++ b/files/apts/marconi-server @@ -0,0 +1,3 @@ +python-pymongo +mongodb-server +pkg-config diff --git a/files/rpms/marconi-server b/files/rpms/marconi-server new file mode 100644 index 0000000000..d7b7ea89c1 --- /dev/null +++ b/files/rpms/marconi-server @@ -0,0 +1,3 @@ +selinux-policy-targeted +mongodb-server +pymongo diff --git a/lib/marconi b/lib/marconi new file mode 100644 index 0000000000..8e0b82b49e --- /dev/null +++ b/lib/marconi @@ -0,0 +1,171 @@ +# lib/marconi +# Install and start **Marconi** service + +# To enable a minimal set of Marconi services, add the following to localrc: +# enable_service marconi-server +# +# Dependencies: +# - functions +# - OS_AUTH_URL for auth in api +# - DEST set to the destination directory +# - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api +# - STACK_USER service user + +# stack.sh +# --------- +# install_marconi +# configure_marconi +# init_marconi +# start_marconi +# stop_marconi +# cleanup_marconi + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories +MARCONI_DIR=$DEST/marconi +MARCONICLIENT_DIR=$DEST/python-marconiclient +MARCONI_CONF_DIR=/etc/marconi +MARCONI_CONF=$MARCONI_CONF_DIR/marconi.conf +MARCONI_API_LOG_DIR=/var/log/marconi-api +MARCONI_AUTH_CACHE_DIR=${MARCONI_AUTH_CACHE_DIR:-/var/cache/marconi} + +# Support potential entry-points console scripts +MARCONI_BIN_DIR=$(get_python_exec_prefix) + +# Set up database backend +MARCONI_BACKEND=${MARCONI_BACKEND:-mongodb} + + +# Set Marconi repository +MARCONI_REPO=${MARCONI_REPO:-${GIT_BASE}/openstack/marconi.git} +MARCONI_BRANCH=${MARCONI_BRANCH:-master} + +# Set client library repository +MARCONICLIENT_REPO=${MARCONICLIENT_REPO:-${GIT_BASE}/openstack/python-marconiclient.git} +MARCONICLIENT_BRANCH=${MARCONICLIENT_BRANCH:-master} + +# Functions +# --------- + +# cleanup_marconi() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_marconi() { + mongo marconi --eval "db.dropDatabase();" +} + +# configure_marconiclient() - Set config files, create data dirs, etc +function configure_marconiclient() { + setup_develop $MARCONICLIENT_DIR +} + +# configure_marconi() - Set config files, create data dirs, etc +function configure_marconi() { + setup_develop $MARCONI_DIR + + [ ! -d $MARCONI_CONF_DIR ] && sudo mkdir -m 755 -p $MARCONI_CONF_DIR + sudo chown $USER $MARCONI_CONF_DIR + + [ ! -d $MARCONI_API_LOG_DIR ] && sudo mkdir -m 755 -p $MARCONI_API_LOG_DIR + sudo chown $USER $MARCONI_API_LOG_DIR + + iniset $MARCONI_CONF DEFAULT verbose True + iniset $MARCONI_CONF 'drivers:transport:wsgi' bind '0.0.0.0' + + # Install the policy file for the API server + cp $MARCONI_DIR/etc/marconi/policy.json $MARCONI_CONF_DIR + iniset $MARCONI_CONF DEFAULT policy_file $MARCONI_CONF_DIR/policy.json + + iniset $MARCONI_CONF keystone_authtoken auth_protocol http + iniset $MARCONI_CONF keystone_authtoken admin_user marconi + iniset $MARCONI_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + iniset $MARCONI_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $MARCONI_CONF keystone_authtoken signing_dir $MARCONI_AUTH_CACHE_DIR + + if [[ "$MARCONI_BACKEND" = 'mongodb' ]]; then + iniset $MARCONI_CONF database connection mongodb://localhost:27017/marconi + configure_mongodb + cleanup_marconi + fi +} + +function configure_mongodb() { + # Set nssize to 2GB. This increases the number of namespaces supported + # # per database. + sudo sed -i '/--nssize/!s/OPTIONS=\"/OPTIONS=\"--nssize 2047 /' /etc/sysconfig/mongod + + restart_service mongod +} + +# init_marconi() - Initialize etc. +function init_marconi() { + # Create cache dir + sudo mkdir -p $MARCONI_AUTH_CACHE_DIR + sudo chown $STACK_USER $MARCONI_AUTH_CACHE_DIR + rm -f $MARCONI_AUTH_CACHE_DIR/* +} + +# install_marconi() - Collect source and prepare +function install_marconi() { + git_clone $MARCONI_REPO $MARCONI_DIR $MARCONI_BRANCH + setup_develop $MARCONI_DIR +} + +# install_marconiclient() - Collect source and prepare +function install_marconiclient() { + git_clone $MARCONICLIENT_REPO $MARCONICLIENT_DIR $MARCONICLIENT_BRANCH + setup_develop $MARCONICLIENT_DIR +} + +# start_marconi() - Start running processes, including screen +function start_marconi() { + screen_it marconi-server "marconi-server --config-file $MARCONI_CONF" +} + +# stop_marconi() - Stop running processes +function stop_marconi() { + # Kill the marconi screen windows + for serv in marconi-server; do + screen -S $SCREEN_NAME -p $serv -X kill + done +} + +function create_marconi_accounts() { + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + MARCONI_USER=$(get_id keystone user-create --name=marconi \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=marconi@example.com) + keystone user-role-add --tenant-id $SERVICE_TENANT \ + --user-id $MARCONI_USER \ + --role-id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + MARCONI_SERVICE=$(get_id keystone service-create \ + --name=marconi \ + --type=queuing \ + --description="Marconi Service") + keystone endpoint-create \ + --region RegionOne \ + --service_id $MARCONI_SERVICE \ + --publicurl "http://$SERVICE_HOST:8888" \ + --adminurl "http://$SERVICE_HOST:8888" \ + --internalurl "http://$SERVICE_HOST:8888" + fi + +} + + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: From bc7f643bb7c7fe704cf436b9d96d878adaadf3c4 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Mon, 25 Nov 2013 10:11:14 -0800 Subject: [PATCH 0279/4438] Fix neutron log format with colorization Closes-Bug: #1254817 Change-Id: I6de17ef15c18e2f8ab246934461a2b7c6ae4f95f --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 70417be5d3..1c01d05e5f 100644 --- a/lib/neutron +++ b/lib/neutron @@ -556,7 +556,7 @@ function _configure_neutron_common() { # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $NEUTRON_CONF DEFAULT + setup_colorized_logging $NEUTRON_CONF DEFAULT project_id fi _neutron_setup_rootwrap From 6cc0c12dd04e7d4f5f78c492ee46df0bd6bd8c43 Mon Sep 17 00:00:00 2001 From: Sushil Kumar Date: Thu, 28 Nov 2013 07:35:11 +0000 Subject: [PATCH 0280/4438] Updates .gitignore Closes-Bug: #1255854 Change-Id: I22b6fa351dd5b654783f432c01785c706eff8397 --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index a3d5b0d02a..49eb188dd8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ proto *~ -.*.sw[nop] +.*.sw? *.log *.log.[1-9] src From 130c90ed0b6111946e4004b125b5ae1a92772a08 Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Thu, 28 Nov 2013 16:56:51 +0900 Subject: [PATCH 0281/4438] Do not create an unnecessary file "ml2" Previously when running devstack with Neutron ML2 plugin, an unnecessary file "ml2" is created in devstack directory. It is because when the first argument is not defined the second argument becomes the first one. This change moves the first "options" argument of populate_ml2_config to the last and checks the given options has a value before adding them to a file. Change-Id: I9ff40456798c42216d414d5f8d443e671ab7d497 Close-Bug: #1255853 --- lib/neutron_plugins/ml2 | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 8d2e303854..b5b1873f3f 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -34,10 +34,13 @@ Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS:-} ML2_L3_PLUGIN=${ML2_L3_PLUGIN:-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin} function populate_ml2_config() { - OPTS=$1 - CONF=$2 - SECTION=$3 + CONF=$1 + SECTION=$2 + OPTS=$3 + if [ -z "$OPTS" ]; then + return + fi for I in "${OPTS[@]}"; do # Replace the first '=' with ' ' for iniset syntax iniset $CONF $SECTION ${I/=/ } @@ -102,19 +105,17 @@ function neutron_plugin_configure_service() { # Since we enable the tunnel TypeDrivers, also enable a local_ip iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP - populate_ml2_config mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS /$Q_PLUGIN_CONF_FILE ml2 + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS - populate_ml2_config type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS /$Q_PLUGIN_CONF_FILE ml2 + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS - populate_ml2_config $Q_SRV_EXTRA_OPTS /$Q_PLUGIN_CONF_FILE ml2 + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 $Q_SRV_EXTRA_OPTS - populate_ml2_config $Q_ML2_PLUGIN_GRE_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_gre + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_gre $Q_ML2_PLUGIN_GRE_TYPE_OPTIONS - populate_ml2_config $Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_vxlan + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vxlan $Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS - if [ "$Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS" != "" ]; then - populate_ml2_config $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_vlan - fi + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vlan $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS } function has_neutron_plugin_security_group() { From 500a32f4f8e8526ba075b61f336cf91dc9d8c652 Mon Sep 17 00:00:00 2001 From: Edgar Magana Date: Mon, 2 Dec 2013 14:27:31 -0800 Subject: [PATCH 0282/4438] Adds entries for missing parameters in PLUMgrid plugin Three configuration parameters were missing for the PLUMgrid plugin. In this patch all those three have been properly added. Change-Id: If070aa5eb35678d0984470ebcd43fd99e08bcc8a Closes-Bug: #1255808 --- lib/neutron_plugins/plumgrid | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid index d4050bb951..bccd301011 100644 --- a/lib/neutron_plugins/plumgrid +++ b/lib/neutron_plugins/plumgrid @@ -6,8 +6,6 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -#source $TOP_DIR/lib/neutron_plugins/ovs_base - function neutron_plugin_create_nova_conf() { : } @@ -23,11 +21,17 @@ function neutron_plugin_configure_common() { Q_PLUGIN_CLASS="neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2" PLUMGRID_DIRECTOR_IP=${PLUMGRID_DIRECTOR_IP:-localhost} PLUMGRID_DIRECTOR_PORT=${PLUMGRID_DIRECTOR_PORT:-7766} + PLUMGRID_ADMIN=${PLUMGRID_ADMIN:-username} + PLUMGRID_PASSWORD=${PLUMGRID_PASSWORD:-password} + PLUMGRID_TIMEOUT=${PLUMGRID_TIMEOUT:-70} } function neutron_plugin_configure_service() { iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector director_server $PLUMGRID_DIRECTOR_IP iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector director_server_port $PLUMGRID_DIRECTOR_PORT + iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector username $PLUMGRID_ADMIN + iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector password $PLUMGRID_PASSWORD + iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector servertimeout $PLUMGRID_TIMEOUT } function neutron_plugin_configure_debug_command() { From f9e773982a905517d78ccaf51ef00ce1860bf591 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Tue, 3 Dec 2013 06:17:16 +0100 Subject: [PATCH 0283/4438] Use fixed network for ssh when n-net is enabled Server rebuild test has stability issues with n-net + floating ip. Change-Id: I8a921fddbca49c8499938a25f9722ea40cee76cc --- lib/tempest | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 803b740221..5a2c78d5e5 100644 --- a/lib/tempest +++ b/lib/tempest @@ -193,7 +193,9 @@ function configure_tempest() { if [ "$Q_USE_NAMESPACE" != "False" ]; then tenant_networks_reachable=false - ssh_connect_method="floating" + if ! is_service_enabled n-net; then + ssh_connect_method="floating" + fi else tenant_networks_reachable=true fi From 7858510ba7d8fa44878374ad71b14e21618adc17 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Wed, 4 Dec 2013 01:41:11 +0400 Subject: [PATCH 0284/4438] Fix savanna-dashboard git repo clone dest It breaks savanna d-g jobs due to the ERROR_ON_CLONE=True in d-g (enforce that nothing will be cloned during the installation). Change-Id: I0531e1baf7252c31eb63ee5b46c28d1dfa7d0a1b --- lib/savanna-dashboard | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard index e96762285c..7713a78637 100644 --- a/lib/savanna-dashboard +++ b/lib/savanna-dashboard @@ -29,7 +29,7 @@ SAVANNA_PYTHONCLIENT_REPO=${SAVANNA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/pyt SAVANNA_PYTHONCLIENT_BRANCH=${SAVANNA_PYTHONCLIENT_BRANCH:-master} # Set up default directories -SAVANNA_DASHBOARD_DIR=$DEST/savanna_dashboard +SAVANNA_DASHBOARD_DIR=$DEST/savanna-dashboard SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient # Functions From 90bcd2ff4d4ea11883a58521e58b67f2d981693b Mon Sep 17 00:00:00 2001 From: Arnaud Legendre Date: Fri, 22 Nov 2013 16:05:39 -0800 Subject: [PATCH 0285/4438] Attempt to retrieve the vmdk descriptor data-pair VMDK formats such as monolithicFlat and vmfs require two files to be fully consumable by the Nova drivers (a descriptor-data pair: *.vmdk and *-flat.vmdk). On the upload of the descriptor (*.vmdk), upload_image.sh should attempt to retrieve the *-flat.vmdk. The same way, the descriptor should be retrieved when a flat disk is uploaded. On success, the upload script will be able to use the flat disk as the image content and the relevant descriptor settings as the image metadata. Change-Id: I9214754029c46dd60b9e7d606d84d8819a498a8d Closes-Bug: #1252443 --- functions | 90 +++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 84 insertions(+), 6 deletions(-) diff --git a/functions b/functions index 6137aafd6e..4d5b4b574f 100644 --- a/functions +++ b/functions @@ -1351,10 +1351,9 @@ function upload_image() { # Create a directory for the downloaded image tarballs. mkdir -p $FILES/images - + IMAGE_FNAME=`basename "$image_url"` if [[ $image_url != file* ]]; then # Downloads the image (uec ami+aki style), then extracts it. - IMAGE_FNAME=`basename "$image_url"` if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then wget -c $image_url -O $FILES/$IMAGE_FNAME if [[ $? -ne 0 ]]; then @@ -1410,13 +1409,92 @@ function upload_image() { vmdk_create_type="$(head -25 $IMAGE | grep -a -F -m 1 'createType=' $IMAGE)" vmdk_create_type="${vmdk_create_type#*\"}" vmdk_create_type="${vmdk_create_type%?}" + + descriptor_data_pair_msg="Monolithic flat and VMFS disks "` + `"should use a descriptor-data pair." if [[ "$vmdk_create_type" = "monolithicSparse" ]]; then vmdk_disktype="sparse" - elif [[ "$vmdk_create_type" = "monolithicFlat" ]]; then - die $LINENO "Monolithic flat disks should use a descriptor-data pair." \ - "Please provide the disk and not the descriptor." + elif [[ "$vmdk_create_type" = "monolithicFlat" || \ + "$vmdk_create_type" = "vmfs" ]]; then + # Attempt to retrieve the *-flat.vmdk + flat_fname="$(head -25 $IMAGE | grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE)" + flat_fname="${flat_fname#*\"}" + flat_fname="${flat_fname%?}" + if [[ -z "$flat_name" ]]; then + flat_fname="$IMAGE_NAME-flat.vmdk" + fi + path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` + flat_url="${image_url:0:$path_len}$flat_fname" + warn $LINENO "$descriptor_data_pair_msg"` + `" Attempt to retrieve the *-flat.vmdk: $flat_url" + if [[ $flat_url != file* ]]; then + if [[ ! -f $FILES/$flat_fname || \ + "$(stat -c "%s" $FILES/$flat_fname)" = "0" ]]; then + wget -c $flat_url -O $FILES/$flat_fname + if [[ $? -ne 0 ]]; then + echo "Flat disk not found: $flat_url" + flat_found=false + fi + fi + if $flat_found; then + IMAGE="$FILES/${flat_fname}" + fi + else + IMAGE=$(echo $flat_url | sed "s/^file:\/\///g") + if [[ ! -f $IMAGE || "$(stat -c "%s" $IMAGE)" == "0" ]]; then + echo "Flat disk not found: $flat_url" + flat_found=false + fi + if ! $flat_found; then + IMAGE=$(echo $image_url | sed "s/^file:\/\///g") + fi + fi + if $flat_found; then + IMAGE_NAME="${flat_fname}" + fi + vmdk_disktype="preallocated" + elif [[ -z "$vmdk_create_type" ]]; then + # *-flat.vmdk provided: attempt to retrieve the descriptor (*.vmdk) + # to retrieve appropriate metadata + if [[ ${IMAGE_NAME: -5} != "-flat" ]]; then + warn $LINENO "Expected filename suffix: '-flat'."` + `" Filename provided: ${IMAGE_NAME}" + else + descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk" + path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` + flat_path="${image_url:0:$path_len}" + descriptor_url=$flat_path$descriptor_fname + warn $LINENO "$descriptor_data_pair_msg"` + `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" + if [[ $flat_path != file* ]]; then + if [[ ! -f $FILES/$descriptor_fname || \ + "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then + wget -c $descriptor_url -O $FILES/$descriptor_fname + if [[ $? -ne 0 ]]; then + warn $LINENO "Descriptor not found $descriptor_url" + descriptor_found=false + fi + fi + descriptor_url="$FILES/$descriptor_fname" + else + descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g") + if [[ ! -f $descriptor_url || \ + "$(stat -c "%s" $descriptor_url)" == "0" ]]; then + warn $LINENO "Descriptor not found $descriptor_url" + descriptor_found=false + fi + fi + if $descriptor_found; then + vmdk_adapter_type="$(head -25 $descriptor_url |"` + `"grep -a -F -m 1 'ddb.adapterType =' $descriptor_url)" + vmdk_adapter_type="${vmdk_adapter_type#*\"}" + vmdk_adapter_type="${vmdk_adapter_type%?}" + fi + fi + #TODO(alegendre): handle streamOptimized once supported by the VMware driver. + vmdk_disktype="preallocated" else - #TODO(alegendre): handle streamOptimized once supported by VMware driver. + #TODO(alegendre): handle streamOptimized once supported by the VMware driver. vmdk_disktype="preallocated" fi From 1bbb0ca9c6126b726ed21738b76befe40345c773 Mon Sep 17 00:00:00 2001 From: Ana Krivokapic Date: Wed, 4 Dec 2013 15:25:45 +0100 Subject: [PATCH 0286/4438] Fix noVNC git repo Recent change switched base git URL to git.openstack.org. However, noVNC is only hosted on GitHub. Change git repo URL for noVNC back to the GitHub one. Change-Id: Iaa9f570639301be1c29cc400c1c73afcbf637b70 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 7eda5a5671..410f9d8d05 100644 --- a/stackrc +++ b/stackrc @@ -178,7 +178,7 @@ BM_POSEUR_REPO=${BM_POSEUR_REPO:-${GIT_BASE}/tripleo/bm_poseur.git} BM_POSEUR_BRANCH=${BM_POSEUR_BRANCH:-master} # a websockets/html5 or flash powered VNC console for vm instances -NOVNC_REPO=${NOVNC_REPO:-${GIT_BASE}/kanaka/noVNC.git} +NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git} NOVNC_BRANCH=${NOVNC_BRANCH:-master} # ryu service From ab77587a371dea59055484b6f9e4ee1a434fcaf0 Mon Sep 17 00:00:00 2001 From: Rafael Folco Date: Mon, 2 Dec 2013 14:04:32 -0200 Subject: [PATCH 0287/4438] Generate glance image with SCSI bus type for ppc arch This patch fixes wrong assumptions for bus types of disk and cdrom on ppc64. Qemu driver assumes IDE bus type for cdrom device, which is not supported on ppc arch. Adds capability to add --property key-value to the glance image-create command. Using double brackets for portability reasons. Change-Id: I9f55fa0b6a894a93926e4f8c3d0ea410b5283f9c --- functions | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/functions b/functions index 6137aafd6e..78c676fe3e 100644 --- a/functions +++ b/functions @@ -554,7 +554,7 @@ function exit_distro_not_supported { function is_arch { ARCH_TYPE=$1 - [ "($uname -m)" = "$ARCH_TYPE" ] + [[ "$(uname -m)" == "$ARCH_TYPE" ]] } # Checks if installed Apache is <= given version @@ -1510,11 +1510,15 @@ function upload_image() { *) echo "Do not know what to do with $IMAGE_FNAME"; false;; esac + if is_arch "ppc64"; then + IMG_PROPERTY="--property hw_disk_bus=scsi --property hw_cdrom_bus=scsi" + fi + if [ "$CONTAINER_FORMAT" = "bare" ]; then if [ "$UNPACK" = "zcat" ]; then - glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}") + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" $IMG_PROPERTY --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}") else - glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}" + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" $IMG_PROPERTY --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}" fi else # Use glance client to add the kernel the root filesystem. @@ -1522,12 +1526,12 @@ function upload_image() { # kernel for use when uploading the root filesystem. KERNEL_ID=""; RAMDISK_ID=""; if [ -n "$KERNEL" ]; then - KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --is-public True --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2) + KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" $IMG_PROPERTY --is-public True --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2) fi if [ -n "$RAMDISK" ]; then - RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --is-public True --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) + RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" $IMG_PROPERTY --is-public True --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) fi - glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --is-public True --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" $IMG_PROPERTY --is-public True --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" fi } From 9a48a351fbb3cadb02cf0162d2bc66054cb6def1 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Wed, 4 Dec 2013 23:13:57 +0000 Subject: [PATCH 0288/4438] XenAPI: Update DomU to Ubuntu Saucy Driven by a race condition in LVM being hit in the tempest tests. This is a locking race between a file lock and a semaphore which is fixed in 2.02.96-5. Change-Id: I8a8c215c90a0602288292ffd06b7694d2db6219e --- tools/xen/xenrc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/xen/xenrc b/tools/xen/xenrc index bdcaf992b2..5796268aaa 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -57,8 +57,8 @@ PUB_IP=${PUB_IP:-172.24.4.10} PUB_NETMASK=${PUB_NETMASK:-255.255.255.0} # Ubuntu install settings -UBUNTU_INST_RELEASE="precise" -UBUNTU_INST_TEMPLATE_NAME="Ubuntu 12.04 (64-bit) for DevStack" +UBUNTU_INST_RELEASE="saucy" +UBUNTU_INST_TEMPLATE_NAME="Ubuntu 13.10 (64-bit) for DevStack" # For 12.04 use "precise" and update template name # However, for 12.04, you should be using # XenServer 6.1 and later or XCP 1.6 or later From a49422e33ec08c2e8391168dd71689674ad8fc7c Mon Sep 17 00:00:00 2001 From: sbauza Date: Thu, 5 Dec 2013 14:56:14 +0100 Subject: [PATCH 0289/4438] Fix install_get_pip in order to work behind a proxy Proxy envvars are not passed to pip when sudo, we need to export them. Change-Id: I67622f5ea8ecb948006e032bdc395ecf36914146 Closes-Bug: #1258155 --- tools/install_pip.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 6b9b25e3e9..a65a77e079 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -67,7 +67,7 @@ function install_get_pip() { curl -O $PIP_GET_PIP_URL; \ ) fi - sudo python $FILES/get-pip.py + sudo -E python $FILES/get-pip.py } function install_pip_tarball() { @@ -75,7 +75,7 @@ function install_pip_tarball() { curl -O $PIP_TAR_URL; \ tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz 1>/dev/null; \ cd pip-$INSTALL_PIP_VERSION; \ - sudo python setup.py install 1>/dev/null; \ + sudo -E python setup.py install 1>/dev/null; \ ) } From 526b79f98825963c5fbb157bca5a54750bd045af Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 22 Nov 2013 11:30:44 -0600 Subject: [PATCH 0290/4438] Eradicate last of underscores in options The --ip_range in stack.sh remains due to nova-manage needing to be fixed. (Rebased 05Dec2013) Change-Id: Ic0f93d41b6edfdc5deb82ae820e2c0c5a8bce24e --- exercises/boot_from_volume.sh | 6 +++--- exercises/client-args.sh | 16 +++++++--------- exercises/floating_ips.sh | 2 +- exercises/neutron-adv-test.sh | 4 ++-- exercises/volumes.sh | 4 ++-- stack.sh | 2 +- 6 files changed, 16 insertions(+), 18 deletions(-) diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 3b3d3ba63b..ed8ba6310e 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -149,7 +149,7 @@ fi # Create the bootable volume start_time=$(date +%s) -cinder create --image-id $IMAGE --display_name=$VOL_NAME --display_description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ +cinder create --image-id $IMAGE --display-name=$VOL_NAME --display-description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ die $LINENO "Failure creating volume $VOL_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then echo "Volume $VOL_NAME not created" @@ -165,10 +165,10 @@ die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME" # Boot instance # ------------- -# Boot using the --block_device_mapping param. The format of mapping is: +# Boot using the --block-device-mapping param. The format of mapping is: # =::: # Leaving the middle two fields blank appears to do-the-right-thing -VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security_groups=$SECGROUP --key_name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2) +VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security-groups=$SECGROUP --key-name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2) die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" # Check that the status is active within ACTIVE_TIMEOUT seconds diff --git a/exercises/client-args.sh b/exercises/client-args.sh index 1e68042cec..e79774f98c 100755 --- a/exercises/client-args.sh +++ b/exercises/client-args.sh @@ -56,10 +56,8 @@ unset OS_PASSWORD unset OS_AUTH_URL # Common authentication args -TENANT_ARG="--os_tenant_name=$x_TENANT_NAME" -TENANT_ARG_DASH="--os-tenant-name=$x_TENANT_NAME" -ARGS="--os_username=$x_USERNAME --os_password=$x_PASSWORD --os_auth_url=$x_AUTH_URL" -ARGS_DASH="--os-username=$x_USERNAME --os-password=$x_PASSWORD --os-auth-url=$x_AUTH_URL" +TENANT_ARG="--os-tenant-name=$x_TENANT_NAME" +ARGS="--os-username=$x_USERNAME --os-password=$x_PASSWORD --os-auth-url=$x_AUTH_URL" # Set global return RETURN=0 @@ -71,7 +69,7 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then STATUS_KEYSTONE="Skipped" else echo -e "\nTest Keystone" - if keystone $TENANT_ARG_DASH $ARGS_DASH catalog --service identity; then + if keystone $TENANT_ARG $ARGS catalog --service identity; then STATUS_KEYSTONE="Succeeded" else STATUS_KEYSTONE="Failed" @@ -90,7 +88,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then else # Test OSAPI echo -e "\nTest Nova" - if nova $TENANT_ARG_DASH $ARGS_DASH flavor-list; then + if nova $TENANT_ARG $ARGS flavor-list; then STATUS_NOVA="Succeeded" else STATUS_NOVA="Failed" @@ -107,7 +105,7 @@ if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then STATUS_CINDER="Skipped" else echo -e "\nTest Cinder" - if cinder $TENANT_ARG_DASH $ARGS_DASH list; then + if cinder $TENANT_ARG $ARGS list; then STATUS_CINDER="Succeeded" else STATUS_CINDER="Failed" @@ -124,7 +122,7 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then STATUS_GLANCE="Skipped" else echo -e "\nTest Glance" - if glance $TENANT_ARG_DASH $ARGS_DASH image-list; then + if glance $TENANT_ARG $ARGS image-list; then STATUS_GLANCE="Succeeded" else STATUS_GLANCE="Failed" @@ -141,7 +139,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; the STATUS_SWIFT="Skipped" else echo -e "\nTest Swift" - if swift $TENANT_ARG_DASH $ARGS_DASH stat; then + if swift $TENANT_ARG $ARGS stat; then STATUS_SWIFT="Succeeded" else STATUS_SWIFT="Failed" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 4d71d49163..7055278f35 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -127,7 +127,7 @@ fi # Boot instance # ------------- -VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) +VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security-groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" # Check that the status is active within ACTIVE_TIMEOUT seconds diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index 28e0a3d441..0a100c0fe8 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -238,9 +238,9 @@ function create_network { source $TOP_DIR/openrc admin admin local TENANT_ID=$(get_tenant_id $TENANT) source $TOP_DIR/openrc $TENANT $TENANT - local NET_ID=$(neutron net-create --tenant_id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) + local NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA" - neutron subnet-create --ip_version 4 --tenant_id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR + neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR neutron-debug probe-create --device-owner compute $NET_ID source $TOP_DIR/openrc demo demo } diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 77fa4ebc25..21b5d21c04 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -130,7 +130,7 @@ fi # Boot instance # ------------- -VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) +VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security-groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" # Check that the status is active within ACTIVE_TIMEOUT seconds @@ -156,7 +156,7 @@ fi # Create a new volume start_time=$(date +%s) -cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ +cinder create --display-name $VOL_NAME --display-description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ die $LINENO "Failure creating volume $VOL_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then die $LINENO "Volume $VOL_NAME not created" diff --git a/stack.sh b/stack.sh index af01faa01e..22d184efd9 100755 --- a/stack.sh +++ b/stack.sh @@ -1083,7 +1083,7 @@ if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nov die_if_not_set $LINENO NOVA_USER_ID "Failure retrieving NOVA_USER_ID for nova" NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1) die_if_not_set $LINENO NOVA_TENANT_ID "Failure retrieving NOVA_TENANT_ID for $SERVICE_TENANT_NAME" - CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID) + CREDS=$(keystone ec2-credentials-create --user-id $NOVA_USER_ID --tenant-id $NOVA_TENANT_ID) ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') iniset $NOVA_CONF DEFAULT s3_access_key "$ACCESS_KEY" From 0c08e7b2b978b71fbb25cea6a9949cea0081db5c Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Wed, 4 Dec 2013 18:03:25 +0400 Subject: [PATCH 0291/4438] Setup user and endpoints for Savanna * create savanna user with admin role for auth token checks * create service data_processing * create savanna endpoint * use savanna user for auth token checks It's needed for running tempest tests. Change-Id: Iff26960746e36012c275f43c0de0dedcaebc8b0a --- extras.d/70-savanna.sh | 1 + lib/savanna | 50 ++++++++++++++++++++++++++++++++++++------ 2 files changed, 44 insertions(+), 7 deletions(-) diff --git a/extras.d/70-savanna.sh b/extras.d/70-savanna.sh index f6881cc4f6..6bbe113fa7 100644 --- a/extras.d/70-savanna.sh +++ b/extras.d/70-savanna.sh @@ -14,6 +14,7 @@ if is_service_enabled savanna; then elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring Savanna" configure_savanna + create_savanna_accounts if is_service_enabled horizon; then configure_savanna_dashboard fi diff --git a/lib/savanna b/lib/savanna index e9dbe72643..6794e36dfd 100644 --- a/lib/savanna +++ b/lib/savanna @@ -3,7 +3,6 @@ # Dependencies: # ``functions`` file # ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined -# ``ADMIN_{TENANT_NAME|PASSWORD}`` must be defined # ``stack.sh`` calls the entry points in this order: # @@ -28,11 +27,12 @@ SAVANNA_BRANCH=${SAVANNA_BRANCH:-master} SAVANNA_DIR=$DEST/savanna SAVANNA_CONF_DIR=${SAVANNA_CONF_DIR:-/etc/savanna} SAVANNA_CONF_FILE=savanna.conf -ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin} -ADMIN_NAME=${ADMIN_NAME:-admin} -ADMIN_PASSWORD=${ADMIN_PASSWORD:-nova} SAVANNA_DEBUG=${SAVANNA_DEBUG:-True} +SAVANNA_SERVICE_HOST=${SAVANNA_SERVICE_HOST:-$SERVICE_HOST} +SAVANNA_SERVICE_PORT=${SAVANNA_SERVICE_PORT:-8386} +SAVANNA_SERVICE_PROTOCOL=${SAVANNA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} + # Support entry points installation of console scripts if [[ -d $SAVANNA_DIR/bin ]]; then SAVANNA_BIN_DIR=$SAVANNA_DIR/bin @@ -43,6 +43,42 @@ fi # Functions # --------- +# create_savanna_accounts() - Set up common required savanna accounts +# +# Tenant User Roles +# ------------------------------ +# service savanna admin +function create_savanna_accounts() { + + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + SAVANNA_USER=$(keystone user-create \ + --name=savanna \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=savanna@example.com \ + | grep " id " | get_field 2) + keystone user-role-add \ + --tenant-id $SERVICE_TENANT \ + --user-id $SAVANNA_USER \ + --role-id $ADMIN_ROLE + + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + SAVANNA_SERVICE=$(keystone service-create \ + --name=savanna \ + --type=data_processing \ + --description="Savanna Data Processing" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $SAVANNA_SERVICE \ + --publicurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ + --adminurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ + --internalurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" + fi +} + # configure_savanna() - Set config files, create data dirs, etc function configure_savanna() { @@ -54,9 +90,9 @@ function configure_savanna() { # Copy over savanna configuration file and configure common parameters. cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_password $ADMIN_PASSWORD - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_username $ADMIN_NAME - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $ADMIN_TENANT_NAME + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_username savanna + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG recreate_database savanna utf8 From 485dd811f6da244c794171362c8ae59d2e6f2d38 Mon Sep 17 00:00:00 2001 From: Geronimo Orozco Date: Fri, 29 Nov 2013 23:53:32 +0000 Subject: [PATCH 0292/4438] Adds python2 to general packages to be installed devstack works only for python2 python3 will break the install. This commit adds python2 to the general dependencies of ubuntu Change-Id: I7721ff85a63f635ff798407a5ee1d6766405c683 Closes-Bug: #1188215 --- files/apts/general | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apts/general b/files/apts/general index fcf0b5b06e..aff687fab4 100644 --- a/files/apts/general +++ b/files/apts/general @@ -20,3 +20,4 @@ tcpdump euca2ools # only for testing client tar python-cmd2 # dist:precise +python2.7 From 7103a84e3900502648e81bd4313b777f2da63f92 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Sat, 7 Dec 2013 13:53:33 -0800 Subject: [PATCH 0293/4438] Workaround Cinder "wishlist" bug 1255593 for multi-backend volume_clear Cinder currently only applies the volume_clear setting from the DEFAULT section of cinder.conf if you're using a single backend. The Cinder team has determined this is a 'wishlist' item to propagate volume_clear to each backend, but it does impact usability and performance. To improve the performance of running Tempest with multi-backends in the gate, workaround the bug in devstack. Related-Bug: #1255593 Change-Id: Ia0ff5422f53eeda9a3ac4336eefec3b9bdea6da2 --- lib/cinder | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/cinder b/lib/cinder index 9288685365..ef3bd81a1f 100644 --- a/lib/cinder +++ b/lib/cinder @@ -237,6 +237,11 @@ function configure_cinder() { iniset $CINDER_CONF lvmdriver-2 volume_group $VOLUME_GROUP2 iniset $CINDER_CONF lvmdriver-2 volume_driver cinder.volume.drivers.lvm.LVMISCSIDriver iniset $CINDER_CONF lvmdriver-2 volume_backend_name LVM_iSCSI_2 + # NOTE(mriedem): Work around Cinder "wishlist" bug 1255593 + if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then + iniset $CINDER_CONF lvmdriver-1 volume_clear none + iniset $CINDER_CONF lvmdriver-2 volume_clear none + fi else iniset $CINDER_CONF DEFAULT volume_group $VOLUME_GROUP iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s From ba0f1d36971fa59a5cc64d4508bc381a26964124 Mon Sep 17 00:00:00 2001 From: Rafael Folco Date: Fri, 6 Dec 2013 17:56:24 -0200 Subject: [PATCH 0294/4438] Qemu emulator requires at least 128MB of memory to boot on ppc64 The default nano and micro flavors need more memory to boot on ppc64. New flavors are 128MB and 256MB, respectively. Trailing spaces removed, again. Change-Id: Ic6740bda959754380982e67f753876dc6d984685 --- lib/tempest | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 5ee4e8a372..4400b31751 100644 --- a/lib/tempest +++ b/lib/tempest @@ -147,12 +147,21 @@ function configure_tempest() { if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then available_flavors=$(nova flavor-list) if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then - nova flavor-create m1.nano 42 64 0 1 + if is_arch "ppc64"; then + # qemu needs at least 128MB of memory to boot on ppc64 + nova flavor-create m1.nano 42 128 0 1 + else + nova flavor-create m1.nano 42 64 0 1 + fi fi flavor_ref=42 boto_instance_type=m1.nano if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then - nova flavor-create m1.micro 84 128 0 1 + if is_arch "ppc64"; then + nova flavor-create m1.micro 84 256 0 1 + else + nova flavor-create m1.micro 84 128 0 1 + fi fi flavor_ref_alt=84 else From bf36e8e4cf89a9de75746ce3e2ae1c98c3948993 Mon Sep 17 00:00:00 2001 From: Darragh O'Reilly Date: Mon, 9 Dec 2013 13:16:16 +0000 Subject: [PATCH 0295/4438] Make rejoin-stack.sh resume file logging if enabled This patch ensures that screen will resume logging to files after rejoin-stack.sh when SCREEN_LOGDIR is set. Change-Id: I4c3eae0df7755b700dd8acf4bf14b7e383372ca3 Closes-bug: 1192568 --- functions | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/functions b/functions index 5ff4a9b7ca..5fa265bd7d 100644 --- a/functions +++ b/functions @@ -1156,6 +1156,11 @@ function screen_rc { NL=`echo -ne '\015'` echo "screen -t $1 bash" >> $SCREENRC echo "stuff \"$2$NL\"" >> $SCREENRC + + if [[ -n ${SCREEN_LOGDIR} ]]; then + echo "logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log" >>$SCREENRC + echo "log on" >>$SCREENRC + fi fi } From 57bf097e5a10e16b7d0cf5bf6c48bc86d78a1553 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Tue, 3 Dec 2013 17:35:02 +0000 Subject: [PATCH 0296/4438] XenAPI: Increase DomU's memory Devstack has been increasing in complexity, and if we use too much of our memory it can lead to fragmentation which in turn causes a DomU error and a failure of random tests. Change-Id: Ide9cc84625caed4e35a64a47ee1e92a6cd567651 --- tools/xen/xenrc | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/xen/xenrc b/tools/xen/xenrc index bdcaf992b2..c0ea3bc85e 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -13,7 +13,13 @@ CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false} # Size of image VDI_MB=${VDI_MB:-5000} -OSDOMU_MEM_MB=3072 + +# Devstack now contains many components. 3GB ram is not enough to prevent +# swapping and memory fragmentation - the latter of which can cause failures +# such as blkfront failing to plug a VBD and lead to random test fails. +# +# Set to 4GB so an 8GB XenServer VM can have a 1GB Dom0 and leave 3GB for VMs +OSDOMU_MEM_MB=4096 OSDOMU_VDI_GB=8 # Network mapping. Specify bridge names or network names. Network names may From 2ac8b3f3c2ebe586802d7789cf152b13fe0d0497 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 4 Dec 2013 17:20:28 -0600 Subject: [PATCH 0297/4438] Fix a couple of INI whitespace bugs * iniset() bails if no section or option (attribute) is supplied * merge_config_file() properly skips lines with only whitespace * Also split the ini-tests into their own script Bug 1257954 Change-Id: Ie31c5bd0df8dfed129fbcf1e37228aaf25e9305d --- functions | 2 + lib/config | 2 +- tests/functions.sh | 189 ------------------------------------- tests/test_config.sh | 18 +++- tests/test_ini.sh | 220 +++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 240 insertions(+), 191 deletions(-) create mode 100755 tests/test_ini.sh diff --git a/functions b/functions index 5ff4a9b7ca..0280b2bcc4 100644 --- a/functions +++ b/functions @@ -729,6 +729,8 @@ function iniset() { local option=$3 local value=$4 + [[ -z $section || -z $option ]] && return + if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then # Add section at the end echo -e "\n[$section]" >>"$file" diff --git a/lib/config b/lib/config index 91cefe48cc..c28072fc08 100644 --- a/lib/config +++ b/lib/config @@ -95,7 +95,7 @@ function merge_config_file() { /^ *\#/ { next } - /^.+/ { + /^[^ \t]+/ { split($0, d, " *= *") print "iniset " configfile " " section " " d[1] " \"" d[2] "\"" } diff --git a/tests/functions.sh b/tests/functions.sh index 40376aa63f..95dafe1028 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -38,195 +38,6 @@ if [[ $? = 0 ]]; then fi -echo "Testing INI functions" - -cat >test.ini <test.ini < Date: Mon, 9 Dec 2013 15:40:22 +1100 Subject: [PATCH 0298/4438] Added keystone auth port to the nova config Added the $KEYSTONE_AUTH_PORT to the keystone_authtoken section of the create_nova_conf function. This is required as without it nova doesn't communicate to the keystone server. Generating an "Unauthorised (HTTP 401)" page when acceesing /admin/. Change-Id: Ibf4d9d1c21081a1e3de4ea765f4db6de5fbdb237 --- lib/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/nova b/lib/nova index 5fd0bebf65..e4dae7c7df 100644 --- a/lib/nova +++ b/lib/nova @@ -398,6 +398,7 @@ function create_nova_conf() { # Add keystone authtoken configuration iniset $NOVA_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $NOVA_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $NOVA_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL iniset $NOVA_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $NOVA_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA From 86d9aed67dd16022fdd688edaf099e42ca761444 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 10 Dec 2013 12:24:16 +0000 Subject: [PATCH 0299/4438] Fix the amount of workers spawned for non proxies We were running as auto so swift would spawn a lot of processes consuming memory and CPU which are not really neeeded in a devstack env (and bad for the jenkins vm). Closes-Bug: 1259548 Change-Id: I6b5266186168fe99568dda5453b436c2f9cfedb3 --- lib/swift | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/swift b/lib/swift index 40722ab030..03aa8f4a7c 100644 --- a/lib/swift +++ b/lib/swift @@ -376,6 +376,9 @@ EOF iniuncomment ${swift_node_config} DEFAULT log_facility iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1 + iniuncomment ${swift_node_config} DEFAULT disable_fallocate iniset ${swift_node_config} DEFAULT disable_fallocate true From 19a47a49a98931ab311fe22ec78ffa4900013b2c Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Tue, 10 Dec 2013 07:41:26 -0800 Subject: [PATCH 0300/4438] Neutron/NVP plugin: fix 'ip link' usage Closes-bug: #1258141 Change-Id: Id26eca6c3174a108d1822440956ab7f66cc3ebd3 --- lib/neutron_thirdparty/nicira | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron_thirdparty/nicira b/lib/neutron_thirdparty/nicira index 3f2a5af11f..3efb5a93b3 100644 --- a/lib/neutron_thirdparty/nicira +++ b/lib/neutron_thirdparty/nicira @@ -33,7 +33,7 @@ function init_nicira() { echo "Defaulting to "$NVP_GATEWAY_NETWORK_CIDR fi # Make sure the interface is up, but not configured - sudo ip link dev $NVP_GATEWAY_NETWORK_INTERFACE set up + sudo ip link set $NVP_GATEWAY_NETWORK_INTERFACE up # Save and then flush the IP addresses on the interface addresses=$(ip addr show dev $NVP_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'}) sudo ip addr flush $NVP_GATEWAY_NETWORK_INTERFACE @@ -45,7 +45,7 @@ function init_nicira() { sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_INTERFACE nvp_gw_net_if_mac=$(ip link show $NVP_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}') - sudo ip link dev $PUBLIC_BRIDGE set address $nvp_gw_net_if_mac + sudo ip link set address $nvp_gw_net_if_mac dev $PUBLIC_BRIDGE for address in $addresses; do sudo ip addr add dev $PUBLIC_BRIDGE $address done From 81fe5f54981e5627bc876ff02753e95705d9d4a1 Mon Sep 17 00:00:00 2001 From: Tomoe Sugihara Date: Thu, 14 Nov 2013 20:04:44 +0000 Subject: [PATCH 0301/4438] Define Q_L3_ENABLED=True for MidoNet plugin Change-Id: Iabf7a5ff2e53b1822a327600da9acac8cf6a59f7 --- lib/neutron_plugins/midonet | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet index e406146bbe..f95fcb75b9 100644 --- a/lib/neutron_plugins/midonet +++ b/lib/neutron_plugins/midonet @@ -62,6 +62,9 @@ function neutron_plugin_configure_service() { if [[ "$MIDONET_PROVIDER_ROUTER_ID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $MIDONET_PROVIDER_ROUTER_ID fi + + Q_L3_ENABLED=True + Q_L3_ROUTER_PER_TENANT=True } function neutron_plugin_setup_interface_driver() { From a515a70e2b58912877bdf2952e7812410da647f3 Mon Sep 17 00:00:00 2001 From: KIYOHIRO ADACHI Date: Wed, 11 Dec 2013 16:11:28 +0900 Subject: [PATCH 0302/4438] Fixed check method of $USE_GET_PIP '[[ -n "$USE_GET_PIP" ]]' always TRUE because $USE_GET_PIP is '0' or '1'. Change-Id: I73c4c6befe2126882ef21991b2a3fe712b2ac388 Closes-Bug: #1259824 --- tools/install_pip.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index a65a77e079..d714d33530 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -87,7 +87,7 @@ get_versions # Eradicate any and all system packages uninstall_package python-pip -if [[ -n "$USE_GET_PIP" ]]; then +if [[ "$USE_GET_PIP" == "1" ]]; then install_get_pip else install_pip_tarball From 0c5a04267458271fb1010cc7dad4226bec7238e7 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Wed, 4 Dec 2013 17:01:01 +1300 Subject: [PATCH 0303/4438] Allow heat tests to use already uploaded test image If $HEAT_FETCHED_TEST_IMAGE is set then tempest is configured to use the image named $HEAT_FETCHED_TEST_IMAGE for any orchestration tests which require an image. Fallback to checking $HEAT_CREATE_TEST_IMAGE and invoking diskimage-builder if necessary. The intent is to use Fedora 20 as the test image for gating since this image has heat-cfntools already installed. Change-Id: I177ae091a641ba99fd4c618e30a39c5148ae617f --- lib/tempest | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 803b740221..d2e3b0a27f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -281,7 +281,9 @@ function configure_tempest() { iniset $TEMPEST_CONF boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # Orchestration test image - if [[ "$HEAT_CREATE_TEST_IMAGE" = "True" ]]; then + if [[ ! -z "$HEAT_FETCHED_TEST_IMAGE" ]]; then + iniset $TEMPEST_CONF orchestration image_ref "$HEAT_FETCHED_TEST_IMAGE" + elif [[ "$HEAT_CREATE_TEST_IMAGE" = "True" ]]; then disk_image_create /usr/share/tripleo-image-elements "vm fedora heat-cfntools" "i386" "fedora-vm-heat-cfntools-tempest" iniset $TEMPEST_CONF orchestration image_ref "fedora-vm-heat-cfntools-tempest" fi From cd7d956fbc30eae3c1694b187ea605a5f0d960d3 Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Thu, 5 Dec 2013 08:09:12 +0000 Subject: [PATCH 0304/4438] Handle the case of pipe char in value for iniset iniset did not handle the case of "|" in the value to be injected. Fix this by replacing | with \000 (NULL). Fixes bug #1258050 Change-Id: I8882c2f3f177ebdfa0c66270dbbc7fd50f30b065 --- functions | 3 ++- tests/test_ini.sh | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 0280b2bcc4..995be576c7 100644 --- a/functions +++ b/functions @@ -741,8 +741,9 @@ function iniset() { $option = $value " "$file" else + local sep=$(echo -ne "\x01") # Replace it - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" "$file" + sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" fi } diff --git a/tests/test_ini.sh b/tests/test_ini.sh index b0dc6b176b..598cd578f6 100755 --- a/tests/test_ini.sh +++ b/tests/test_ini.sh @@ -136,6 +136,26 @@ else echo "iniget failed: $VAL" fi +# test pipe in option +iniset test.ini aaa handlers "a|b" + +VAL=$(iniget test.ini aaa handlers) +if [[ "$VAL" == "a|b" ]]; then + echo "OK: $VAL" +else + echo "iniget failed: $VAL" +fi + +# test space in option +iniset test.ini aaa handlers "a b" + +VAL="$(iniget test.ini aaa handlers)" +if [[ "$VAL" == "a b" ]]; then + echo "OK: $VAL" +else + echo "iniget failed: $VAL" +fi + # Test section not exist VAL=$(iniget test.ini zzz handlers) From 0718568b1203bd11058d3cd28402f84841c01dda Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Fri, 13 Dec 2013 15:20:26 +0100 Subject: [PATCH 0305/4438] Support oslo-rootwrap in lib/cinder Make lib/cinder support both cinder-rootwrap (current case) and oslo-rootwrap (future case) to handle the Cinder transition towards oslo-rootwrap usage peacefully. Related blueprint: https://blueprints.launchpad.net/cinder/+spec/cinder-oslo-rootwrap Change-Id: I663986304bd74cb6d72d51c553540fb5f9db1d1d --- lib/cinder | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index ef3bd81a1f..cbe732e9b0 100644 --- a/lib/cinder +++ b/lib/cinder @@ -174,6 +174,12 @@ function configure_cinder() { # Set the paths of certain binaries CINDER_ROOTWRAP=$(get_rootwrap_location cinder) + if [[ ! -x $CINDER_ROOTWRAP ]]; then + CINDER_ROOTWRAP=$(get_rootwrap_location oslo) + if [[ ! -x $CINDER_ROOTWRAP ]]; then + die $LINENO "No suitable rootwrap found." + fi + fi # If Cinder ships the new rootwrap filters files, deploy them # (owned by root) and add a parameter to $CINDER_ROOTWRAP @@ -189,11 +195,16 @@ function configure_cinder() { sudo chown -R root:root $CINDER_CONF_DIR/rootwrap.d sudo chmod 644 $CINDER_CONF_DIR/rootwrap.d/* # Set up rootwrap.conf, pointing to /etc/cinder/rootwrap.d - sudo cp $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR/ + if [[ -f $CINDER_DIR/etc/cinder/rootwrap.conf ]]; then + sudo cp $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR/ + else + # rootwrap.conf is no longer shipped in Cinder itself + echo "filters_path=" | sudo tee $CINDER_CONF_DIR/rootwrap.conf > /dev/null + fi sudo sed -e "s:^filters_path=.*$:filters_path=$CINDER_CONF_DIR/rootwrap.d:" -i $CINDER_CONF_DIR/rootwrap.conf sudo chown root:root $CINDER_CONF_DIR/rootwrap.conf sudo chmod 0644 $CINDER_CONF_DIR/rootwrap.conf - # Specify rootwrap.conf as first parameter to cinder-rootwrap + # Specify rootwrap.conf as first parameter to rootwrap CINDER_ROOTWRAP="$CINDER_ROOTWRAP $CINDER_CONF_DIR/rootwrap.conf" ROOTWRAP_CINDER_SUDOER_CMD="$CINDER_ROOTWRAP *" fi From 0f7ad6bba6fe451c69cdc27fadfbb8ed8fdc7b71 Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Fri, 13 Dec 2013 12:42:31 -0500 Subject: [PATCH 0306/4438] Fix Issues in Marconi integration This patch fixes a couple of issues, that prevents marconi from running on devstack. Change-Id: I47060a0334ad6f90f1402b34c83bb6ad22f723d4 Closes-Bug: #1260820 --- exercises/marconi.sh | 2 +- lib/marconi | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/exercises/marconi.sh b/exercises/marconi.sh index 1b9788dce6..9d83a99f02 100755 --- a/exercises/marconi.sh +++ b/exercises/marconi.sh @@ -35,7 +35,7 @@ source $TOP_DIR/exerciserc is_service_enabled marconi-server || exit 55 -curl http://$SERVICE_HOST:8888/v1/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Marconi API not functioning!" +curl http://$SERVICE_HOST:8888/v1/ 2>/dev/null | grep -q 'queue_name' || die $LINENO "Marconi API not functioning!" set +o xtrace echo "*********************************************************************" diff --git a/lib/marconi b/lib/marconi index 8e0b82b49e..742f866e7d 100644 --- a/lib/marconi +++ b/lib/marconi @@ -148,10 +148,11 @@ function create_marconi_accounts() { --user-id $MARCONI_USER \ --role-id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - MARCONI_SERVICE=$(get_id keystone service-create \ + MARCONI_SERVICE=$(keystone service-create \ --name=marconi \ --type=queuing \ - --description="Marconi Service") + --description="Marconi Service" \ + | grep " id " | get_field 2) keystone endpoint-create \ --region RegionOne \ --service_id $MARCONI_SERVICE \ From 055cdee2bf4582e39fa91b96de745783850f082d Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Fri, 13 Dec 2013 13:51:25 -0600 Subject: [PATCH 0307/4438] Remove duplicate debug option from keystone-all The keystone server was started with the options like keystone-all ... -d --debug ... The -d and --debug options are the same so one of them is redundant. This will make it less confusing if someone removes --debug and thinks that debug is off, but debug is still on because they didn't notice there was an extra -d. Change-Id: I1ac977e6b12f1cc44f02b636c1bfb5c115b5b3e4 --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 712a509fde..29b9604efe 100644 --- a/lib/keystone +++ b/lib/keystone @@ -403,7 +403,7 @@ function start_keystone() { screen_it key "cd $KEYSTONE_DIR && sudo tail -f /var/log/$APACHE_NAME/keystone" else # Start Keystone in a screen window - screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" + screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG --debug" fi echo "Waiting for keystone to start..." From 90234ac4b03426d844b72d251d4cae13fa09cde5 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Mon, 25 Nov 2013 05:44:10 -0800 Subject: [PATCH 0308/4438] Default to /24 prefix for floating IP range with neutron When running Tempest parallel tests with neutron, several long-running tests might create routers and floating IPs, which will result in IP allocations over the public network. Increasing the public network size should ensure tests do not fail due to IP address shortage; this patch also updates the public network gateway IP address. Related-Bug: 1253966 Change-Id: Ie075b3c4d14a07b06c42fd29b09770dd1972aa45 --- lib/neutron | 2 +- lib/neutron_thirdparty/nicira | 2 +- stack.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/neutron b/lib/neutron index 70417be5d3..786e8f8712 100644 --- a/lib/neutron +++ b/lib/neutron @@ -68,7 +68,7 @@ set +o xtrace # Gateway and subnet defaults, in case they are not customized in localrc NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} -PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.225} +PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.1} PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"} PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"} diff --git a/lib/neutron_thirdparty/nicira b/lib/neutron_thirdparty/nicira index 3f2a5af11f..5b034e0c62 100644 --- a/lib/neutron_thirdparty/nicira +++ b/lib/neutron_thirdparty/nicira @@ -20,7 +20,7 @@ set +o xtrace NVP_GATEWAY_NETWORK_INTERFACE=${NVP_GATEWAY_NETWORK_INTERFACE:-eth2} # Re-declare floating range as it's needed also in stop_nicira, which # is invoked by unstack.sh -FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28} +FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} function configure_nicira() { : diff --git a/stack.sh b/stack.sh index 47d93bd642..d0674066b5 100755 --- a/stack.sh +++ b/stack.sh @@ -260,7 +260,7 @@ safe_chown -R $STACK_USER $DATA_DIR # from either range when attempting to guess the IP to use for the host. # Note that setting FIXED_RANGE may be necessary when running DevStack # in an OpenStack cloud that uses either of these address ranges internally. -FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28} +FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} From 8c1b95eef119837428993d32a05f97a231f44b9e Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Mon, 16 Dec 2013 11:04:03 +1300 Subject: [PATCH 0309/4438] Add Fedora 20 to IMAGE_URLS Fedora 20 is only added to IMAGE_URLS if $HEAT_FETCHED_TEST_IMAGE contains the exact known image name This image is used for running heat tempest tests which require an image which has heat-cfntools installed. Change-Id: Ic6cdea932a5d5f3de138da96d27e407775b3e84b --- stackrc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stackrc b/stackrc index 410f9d8d05..e89e64dc65 100644 --- a/stackrc +++ b/stackrc @@ -282,6 +282,9 @@ case "$VIRT_DRIVER" in IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};; esac +if [[ "$HEAT_FETCHED_TEST_IMAGE" == "Fedora-i386-20-20131211.1-sda" ]]; then + IMAGE_URLS+=",https://dl.fedoraproject.org/pub/alt/stage/20-RC1.1/Images/i386/$HEAT_FETCHED_TEST_IMAGE.qcow2" +fi # 10Gb default volume backing file size VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-10250M} From edddb1fddf6b571286d85057abe8aa1cd21e67a6 Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Mon, 9 Dec 2013 20:21:06 +0900 Subject: [PATCH 0310/4438] Stop Neutron advanced service external processes Some of Neutron advanced services (LBaaS, VPNaaS, ....) creates external processes and they should be stopped in unstack. This commit defines neutron__stop functions for all services and implements the cleanup logics if necessary. Also cleanup_neutron removes netns used by LBaaS haproxy. Change-Id: Ied3a2c374ffcb6b59ecaf1027fb6e6083eded2ae --- lib/neutron | 15 ++++++++++++++- lib/neutron_plugins/services/firewall | 4 ++++ lib/neutron_plugins/services/loadbalancer | 5 +++++ lib/neutron_plugins/services/metering | 4 ++++ lib/neutron_plugins/services/vpn | 11 +++++++++++ 5 files changed, 38 insertions(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index b05b16d72e..851b2ac65e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -508,6 +508,19 @@ function stop_neutron() { pid=$(ps aux | awk '/neutron-ns-metadata-proxy/ { print $2 }') [ ! -z "$pid" ] && sudo kill -9 $pid fi + + if is_service_enabled q-lbaas; then + neutron_lbaas_stop + fi + if is_service_enabled q-fwaas; then + neutron_fwaas_stop + fi + if is_service_enabled q-vpn; then + neutron_vpn_stop + fi + if is_service_enabled q-metering; then + neutron_metering_stop + fi } # cleanup_neutron() - Remove residual data files, anything left over from previous @@ -518,7 +531,7 @@ function cleanup_neutron() { fi # delete all namespaces created by neutron - for ns in $(sudo ip netns list | grep -o -e qdhcp-[0-9a-f\-]* -e qrouter-[0-9a-f\-]*); do + for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas)-[0-9a-f-]*'); do sudo ip netns delete ${ns} done } diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall index 1597e8577d..580071ff71 100644 --- a/lib/neutron_plugins/services/firewall +++ b/lib/neutron_plugins/services/firewall @@ -23,5 +23,9 @@ function neutron_fwaas_configure_driver() { iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas driver "neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver" } +function neutron_fwaas_stop() { + : +} + # Restore xtrace $MY_XTRACE diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer index c38f904b69..2699a9b698 100644 --- a/lib/neutron_plugins/services/loadbalancer +++ b/lib/neutron_plugins/services/loadbalancer @@ -45,5 +45,10 @@ function neutron_agent_lbaas_configure_agent() { fi } +function neutron_lbaas_stop() { + pids=$(ps aux | awk '/haproxy/ { print $2 }') + [ ! -z "$pids" ] && sudo kill $pids +} + # Restore xtrace $MY_XTRACE diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering index 629f3b788a..b105429bfd 100644 --- a/lib/neutron_plugins/services/metering +++ b/lib/neutron_plugins/services/metering @@ -26,5 +26,9 @@ function neutron_agent_metering_configure_agent() { cp $NEUTRON_DIR/etc/metering_agent.ini $METERING_AGENT_CONF_FILENAME } +function neutron_metering_stop() { + : +} + # Restore xtrace $MY_XTRACE diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn index b8f5c7d56b..55d0a76363 100644 --- a/lib/neutron_plugins/services/vpn +++ b/lib/neutron_plugins/services/vpn @@ -22,5 +22,16 @@ function neutron_vpn_configure_common() { fi } +function neutron_vpn_stop() { + local ipsec_data_dir=$DATA_DIR/neutron/ipsec + local pids + if [ -d $ipsec_data_dir ]; then + pids=$(find $ipsec_data_dir -name 'pluto.pid' -exec cat {} \;) + fi + if [ -n "$pids" ]; then + sudo kill $pids + fi +} + # Restore xtrace $MY_XTRACE From 1692bda49264e35757c0f2f8d9264681256657b6 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Fri, 13 Dec 2013 10:15:34 -0800 Subject: [PATCH 0311/4438] Add ability to configure tempest tests based on network extensions This patch introduces a Devstack variable to specify which network extensions are enabled; this is useful for configuring tempest runs when third-party plugins that do not support certain extensions. Closes-bug: #1247778 Closes-bug: #1231152 Change-Id: Iee170993cb164502774f9ac4201b963d9a2715ba --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index ec1fc90b76..0af93103a4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -299,6 +299,9 @@ function configure_tempest() { # cli iniset $TEMPEST_CONF cli cli_dir $NOVA_BIN_DIR + # Networking + iniset $TEMPEST_CONF network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}" + # service_available for service in nova cinder glance neutron swift heat horizon ceilometer; do if is_service_enabled $service ; then From db54311552d6c1efad7d9958a539848b3aeea775 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 16 Dec 2013 16:35:44 -0500 Subject: [PATCH 0312/4438] add libxslt1-dev to tempest required files tempest actually needs a pretty new lxml, which means it's going to build it from source. To do that it needs libxslt1-dev to compile. We should be good and let devstack do this out of the box so it works on minimal environments. Change-Id: Ia527905c1c15fb8c6793f0ce543ad05e25a88179 --- files/apts/tempest | 1 + files/rpms/tempest | 1 + 2 files changed, 2 insertions(+) create mode 100644 files/apts/tempest create mode 100644 files/rpms/tempest diff --git a/files/apts/tempest b/files/apts/tempest new file mode 100644 index 0000000000..f244e4e783 --- /dev/null +++ b/files/apts/tempest @@ -0,0 +1 @@ +libxslt1-dev \ No newline at end of file diff --git a/files/rpms/tempest b/files/rpms/tempest new file mode 100644 index 0000000000..de32b81504 --- /dev/null +++ b/files/rpms/tempest @@ -0,0 +1 @@ +libxslt-dev \ No newline at end of file From be2ff9a745c46bd6627b68d9b6fbdbb724f3854d Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 17 Dec 2013 16:26:21 +1100 Subject: [PATCH 0313/4438] Match RHEL Beta release strings RHEL7 (in beta) has "Beta" after the release number and before the code-name. Add a number-match to the regex so everything between that match and the codename in parenthesis is considered the release. Change-Id: I992f20eedcefc6aeed6bd3ad57fc4cf20c8ef15d --- functions | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 7e081907aa..e79e1d58af 100644 --- a/functions +++ b/functions @@ -422,6 +422,7 @@ GetOSVersion() { os_CODENAME=$(lsb_release -c -s) elif [[ -r /etc/redhat-release ]]; then # Red Hat Enterprise Linux Server release 5.5 (Tikanga) + # Red Hat Enterprise Linux Server release 7.0 Beta (Maipo) # CentOS release 5.5 (Final) # CentOS Linux release 6.0 (Final) # Fedora release 16 (Verne) @@ -430,7 +431,7 @@ GetOSVersion() { for r in "Red Hat" CentOS Fedora XenServer; do os_VENDOR=$r if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then - ver=`sed -e 's/^.* \(.*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release` + ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release` os_CODENAME=${ver#*|} os_RELEASE=${ver%|*} os_UPDATE=${os_RELEASE##*.} From cff1c7d3700278263da1a6f0daa381e97df15f28 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Wed, 18 Dec 2013 13:20:40 +0000 Subject: [PATCH 0314/4438] HEAT_FETCHED_TEST_IMAGE may not be defined in some cases. Some scripts that source stackrc use set -u, which will error when unknown variables are accessed Change-Id: I5af2d58475e1793dfa728b7ce9180fcbba1145e9 --- stackrc | 1 + 1 file changed, 1 insertion(+) diff --git a/stackrc b/stackrc index e89e64dc65..b129197e8f 100644 --- a/stackrc +++ b/stackrc @@ -282,6 +282,7 @@ case "$VIRT_DRIVER" in IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};; esac +HEAT_FETCHED_TEST_IMAGE=${HEAT_FETCHED_TEST_IMAGE:-""} if [[ "$HEAT_FETCHED_TEST_IMAGE" == "Fedora-i386-20-20131211.1-sda" ]]; then IMAGE_URLS+=",https://dl.fedoraproject.org/pub/alt/stage/20-RC1.1/Images/i386/$HEAT_FETCHED_TEST_IMAGE.qcow2" fi From 9e136b4adee6ce33fdbf01e0a8614c186c5f20b7 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 16 Dec 2013 15:52:03 +0900 Subject: [PATCH 0315/4438] Neutron: Define a utility function to add neutron service class When enabling neutron service (i.e. enable_service q-xxx), related code unconditionally adds a necessary plugin class to Q_SERVICE_PLUGIN_CLASSES. Which may cause duplication in Q_SERVICE_PLUGIN_CLASSES when Q_SERVICE_PLUGIN_CLASSES is explicitly specified in localrc. As a result, neutron server fails to start. This patch introduces a utility function to add service class, and check duplication. Closes-Bug: #1261291 Change-Id: Id2880c7647babfccc3e8d9fc60dd93c4b3997ed9 --- lib/neutron | 10 ++++++++++ lib/neutron_plugins/ml2 | 6 +----- lib/neutron_plugins/services/firewall | 6 +----- lib/neutron_plugins/services/loadbalancer | 6 +----- lib/neutron_plugins/services/metering | 6 +----- lib/neutron_plugins/services/vpn | 6 +----- 6 files changed, 15 insertions(+), 25 deletions(-) diff --git a/lib/neutron b/lib/neutron index b05b16d72e..38081653e4 100644 --- a/lib/neutron +++ b/lib/neutron @@ -744,6 +744,16 @@ function _configure_neutron_service() { # Utility Functions #------------------ +# _neutron_service_plugin_class_add() - add service plugin class +function _neutron_service_plugin_class_add() { + local service_plugin_class=$1 + if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then + Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class + elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then + Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class" + fi +} + # _neutron_setup_rootwrap() - configure Neutron's rootwrap function _neutron_setup_rootwrap() { if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index b5b1873f3f..ab4e3474a6 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -54,11 +54,7 @@ function neutron_plugin_configure_common() { Q_PLUGIN_CLASS="neutron.plugins.ml2.plugin.Ml2Plugin" # The ML2 plugin delegates L3 routing/NAT functionality to # the L3 service plugin which must therefore be specified. - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$ML2_L3_PLUGIN - else - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$ML2_L3_PLUGIN" - fi + _neutron_service_plugin_class_add $ML2_L3_PLUGIN } function neutron_plugin_configure_service() { diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall index 1597e8577d..97cc5a28fd 100644 --- a/lib/neutron_plugins/services/firewall +++ b/lib/neutron_plugins/services/firewall @@ -8,11 +8,7 @@ set +o xtrace FWAAS_PLUGIN=neutron.services.firewall.fwaas_plugin.FirewallPlugin function neutron_fwaas_configure_common() { - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$FWAAS_PLUGIN - else - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$FWAAS_PLUGIN" - fi + _neutron_service_plugin_class_add $FWAAS_PLUGIN } function neutron_fwaas_configure_driver() { diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer index c38f904b69..6ff991c855 100644 --- a/lib/neutron_plugins/services/loadbalancer +++ b/lib/neutron_plugins/services/loadbalancer @@ -19,11 +19,7 @@ function neutron_agent_lbaas_install_agent_packages() { } function neutron_agent_lbaas_configure_common() { - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$LBAAS_PLUGIN - else - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$LBAAS_PLUGIN" - fi + _neutron_service_plugin_class_add $LBAAS_PLUGIN } function neutron_agent_lbaas_configure_agent() { diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering index 629f3b788a..5cabfbfc3b 100644 --- a/lib/neutron_plugins/services/metering +++ b/lib/neutron_plugins/services/metering @@ -10,11 +10,7 @@ AGENT_METERING_BINARY="$NEUTRON_BIN_DIR/neutron-metering-agent" METERING_PLUGIN="neutron.services.metering.metering_plugin.MeteringPlugin" function neutron_agent_metering_configure_common() { - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$METERING_PLUGIN - else - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$METERING_PLUGIN" - fi + _neutron_service_plugin_class_add $METERING_PLUGIN } function neutron_agent_metering_configure_agent() { diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn index b8f5c7d56b..1ab07cb93c 100644 --- a/lib/neutron_plugins/services/vpn +++ b/lib/neutron_plugins/services/vpn @@ -15,11 +15,7 @@ function neutron_vpn_install_agent_packages() { } function neutron_vpn_configure_common() { - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$VPN_PLUGIN - else - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$VPN_PLUGIN" - fi + _neutron_service_plugin_class_add $VPN_PLUGIN } # Restore xtrace From af72b68ab0bb69178084d27374a3ec96ced40e98 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Fri, 20 Dec 2013 08:39:12 +1300 Subject: [PATCH 0316/4438] Use final release of Fedora 20 for heat test image The image file has not changed, but the URL now points to the release directory for Fedora 20. Change-Id: Ie1a9bcc7da634996b25ef7f6fc694398c632549d --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index e89e64dc65..2c770bc7a7 100644 --- a/stackrc +++ b/stackrc @@ -283,7 +283,7 @@ case "$VIRT_DRIVER" in esac if [[ "$HEAT_FETCHED_TEST_IMAGE" == "Fedora-i386-20-20131211.1-sda" ]]; then - IMAGE_URLS+=",https://dl.fedoraproject.org/pub/alt/stage/20-RC1.1/Images/i386/$HEAT_FETCHED_TEST_IMAGE.qcow2" + IMAGE_URLS+=",https://dl.fedoraproject.org/pub/fedora/linux/releases/20/Images/i386/$HEAT_FETCHED_TEST_IMAGE.qcow2" fi # 10Gb default volume backing file size From bff001456cc5a804f752722d1c406bbb880dd542 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 20 Dec 2013 11:55:08 +0900 Subject: [PATCH 0317/4438] config: get_meta_section() misunderstands the beginning of metasection For example, the line, "if [[ -n $no_proxy ]]; then" is misparsed as the beginning of metasection because get_meta_section() misses escaping of "|" unlike get_meta_section_files(). This patch adds necessary escape as "|" -> "\|". Change-Id: Ic14b2ac167037c4f5db89492f0e8a4c5b13c7b6d Closes-Bug: #1262960 --- lib/config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/config b/lib/config index c28072fc08..1678aec3fc 100644 --- a/lib/config +++ b/lib/config @@ -35,7 +35,7 @@ function get_meta_section() { $CONFIG_AWK_CMD -v matchgroup=$matchgroup -v configfile=$configfile ' BEGIN { group = "" } - /^\[\[.+|.*\]\]/ { + /^\[\[.+\|.*\]\]/ { if (group == "") { gsub("[][]", "", $1); split($1, a, "|"); From 74ba66dd3f3d54d6a2bec925e8c6573810976b5b Mon Sep 17 00:00:00 2001 From: Yuiko Takada Date: Fri, 20 Dec 2013 08:25:10 +0000 Subject: [PATCH 0318/4438] Fix the option of a2ensite command devstack fails with trema when execute "sudo a2ensite sliceable_switch" command, without ".conf" filename extension with Apache2.4, Apache2.22. With Apache 2.2, it successes. Because in the versions which newer than version 2.2, file checking of a2ensite command is more severe. So, a2ensite command forbid "sliceable_switch" without "/conf". Added ".conf" filename extension. Change-Id: I29a03cb59ee493345b7df0f1a9189eb3516c86e2 Closes-Bug: #1263017 --- lib/neutron_thirdparty/trema | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema index 9efd3f6c39..bdc23568fb 100644 --- a/lib/neutron_thirdparty/trema +++ b/lib/neutron_thirdparty/trema @@ -62,7 +62,7 @@ function init_trema() { sudo sed -i -e "s|/home/sliceable_switch/script|$TREMA_SS_SCRIPT_DIR|" \ $TREMA_SS_APACHE_CONFIG sudo a2enmod rewrite actions - sudo a2ensite sliceable_switch + sudo a2ensite sliceable_switch.conf cp $TREMA_SS_DIR/sliceable_switch_null.conf $TREMA_SS_CONFIG sed -i -e "s|^\$apps_dir.*$|\$apps_dir = \"$TREMA_DIR/apps\"|" \ From 355fc866833e0bd83796da1c45e4f94b58d5f500 Mon Sep 17 00:00:00 2001 From: Flavio Percoco Date: Fri, 29 Nov 2013 14:27:35 +0100 Subject: [PATCH 0319/4438] Explicily enable the stores used by devstack Devstack currently relies on the default value of the `known_stores` configuration option. This patch enables explicitly the default stores used by devstack. The real fix for the issue below will land in Glance. However, since the default stores will be FS and HTTP we need devstack to enable Swift's as well, which is required in the gates, hence this patch. Partially-fixes: #1255556 Change-Id: Id9aab356b36b2150312324a0349d120bbbbd4e63 --- lib/glance | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/glance b/lib/glance index 2e29a8f77c..b278796d21 100644 --- a/lib/glance +++ b/lib/glance @@ -124,6 +124,8 @@ function configure_glance() { iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True + + iniset_multiline DEFAULT known_stores glance.store.filesystem.Store glance.store.http.Store glance.store.swift.Store fi cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI From 63d9f3e550e0918ae59ed76bd5cf0fe6ef15353b Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Sat, 21 Dec 2013 01:19:09 -0800 Subject: [PATCH 0320/4438] Fix bad copy and paste in lib/swift Change-Id: I3b7526b52867525b1d7aa634aa8163c520a92f97 --- lib/swift | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/swift b/lib/swift index 3bf2b7804f..96929db557 100644 --- a/lib/swift +++ b/lib/swift @@ -378,8 +378,8 @@ EOF iniuncomment ${swift_node_config} DEFAULT log_facility iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1 + iniuncomment ${swift_node_config} DEFAULT workers + iniset ${swift_node_config} DEFAULT workers 1 iniuncomment ${swift_node_config} DEFAULT disable_fallocate iniset ${swift_node_config} DEFAULT disable_fallocate true From 16312738d1a8302537e76e1e6cdeac85d63b64aa Mon Sep 17 00:00:00 2001 From: Jianing Yang Date: Sun, 22 Dec 2013 10:47:39 +0800 Subject: [PATCH 0321/4438] Correct glance db_sync command Closes-Bug: #1263431 Change-Id: I30a53adfdd8e00a9995595af2e090190bac241a0 --- lib/glance | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index b278796d21..135136db7e 100644 --- a/lib/glance +++ b/lib/glance @@ -176,7 +176,7 @@ function init_glance() { recreate_database glance utf8 # Migrate glance database - $GLANCE_BIN_DIR/glance-manage db sync + $GLANCE_BIN_DIR/glance-manage db_sync create_glance_cache_dir } From 6fbb28d021d168271bb2a0643059e8c65c8ce74b Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Sun, 22 Dec 2013 07:59:37 -0800 Subject: [PATCH 0322/4438] Neutron: create network resources when agents are started Creating network resources before the agents start with the ml2 plugin might result in bnding failures for some resources such as DHCP ports because the resources are created before the agents report to the server. This patch should ensure all agents have started and reported their state to the server before creating network resources. Change-Id: Ifafb73bd3c5409a555a573ad9a94b96d79061c38 Related-Bug: #1253896 --- stack.sh | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index ce5fbd47e5..d54a652928 100755 --- a/stack.sh +++ b/stack.sh @@ -1104,10 +1104,7 @@ fi if is_service_enabled q-svc; then echo_summary "Starting Neutron" - start_neutron_service_and_check - create_neutron_initial_network - setup_neutron_debug elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then NM_CONF=${NOVA_CONF} if is_service_enabled n-cell; then @@ -1127,6 +1124,12 @@ fi if is_service_enabled neutron; then start_neutron_agents fi +# Once neutron agents are started setup initial network elements +if is_service_enabled q-svc; then + echo_summary "Creating initial neutron network elements" + create_neutron_initial_network + setup_neutron_debug +fi if is_service_enabled nova; then echo_summary "Starting Nova" start_nova From 60fcfb5c91063bb71252b7077a363092d8bebe2b Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Mon, 23 Dec 2013 17:23:47 +0000 Subject: [PATCH 0323/4438] XenAPI: Fix bug with Xen ext4-using guests Ubuntu saucy is using ext4, which means it hits a barrier bug with certain versions of Xen, leading to a read only filesystem. This is bug https://bugs.launchpad.net/ubuntu/+source/linux/+bug/824089 Change-Id: I9a72b203d473dc555324d44ad7c240c80dccda15 --- tools/xen/prepare_guest_template.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh index 6ea6f6321d..546ac99cd9 100755 --- a/tools/xen/prepare_guest_template.sh +++ b/tools/xen/prepare_guest_template.sh @@ -79,3 +79,7 @@ bash /opt/stack/prepare_guest.sh \\ "$GUEST_PASSWORD" "$XS_TOOLS_PATH" "$STACK_USER" \\ > /opt/stack/prepare_guest.log 2>&1 EOF + +# Need to set barrier=0 to avoid a Xen bug +# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/824089 +sed -i -e 's/errors=/barrier=0,errors=/' $STAGING_DIR/etc/fstab From e4b85590037974b04487be5b4e23166a8a35d9dc Mon Sep 17 00:00:00 2001 From: Ben Nemec Date: Fri, 20 Dec 2013 19:51:04 +0000 Subject: [PATCH 0324/4438] Set default_network in tempest.conf This is to support testing of change Ia78582cac3790653c2281a5b63d953cd46d5c290 in Tempest. Change-Id: Ibb812e2598fb11b7eef21a0868ee9baeea73186c --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 0969b2df1f..95b300ce77 100644 --- a/lib/tempest +++ b/lib/tempest @@ -283,6 +283,7 @@ function configure_tempest() { iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable" iniset $TEMPEST_CONF network public_network_id "$public_network_id" iniset $TEMPEST_CONF network public_router_id "$public_router_id" + iniset $TEMPEST_CONF network default_network "$FIXED_RANGE" # boto iniset $TEMPEST_CONF boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" From 3bd85c9d6e257fc952cb3c6d0c09e199685bd5ed Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Wed, 25 Dec 2013 22:14:11 +0900 Subject: [PATCH 0325/4438] Change the libvirtd log level to DEBUG Gate tests fail sometimes due to libvirt problems, but it is difficult to investigate their reasons or workarounds because there is not any log about libvirt. This patch changes the log level of libvirtd to DEBUG for investigating libvirt problems. Change-Id: Ib6559ff978fa813d0332f2339d241dd3437196ee Related-Bug: #1254872 --- lib/nova_plugins/hypervisor-libvirt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 6f90f4ac17..ef40e7ab4c 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -93,6 +93,9 @@ EOF" fi fi + # Change the libvirtd log level to DEBUG. + sudo sed -i s/"#log_level = 3"/"log_level = 1"/ /etc/libvirt/libvirtd.conf + # The user that nova runs as needs to be member of **libvirtd** group otherwise # nova-compute will be unable to use libvirt. if ! getent group $LIBVIRT_GROUP >/dev/null; then From 9aadec380605e4b2aab0fb159c4186618a284853 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Fri, 27 Dec 2013 19:08:26 +0900 Subject: [PATCH 0326/4438] Remove some keystone resource parsers Current "keystone" command can parse the specified resources(tenant, user, role, service) by itself. Then it is unnecessary to translate resource names to resource ids in devstack. This patch removes these resource parsers from devstack for cleanup. Change-Id: Ibae06581b471f02168b559b4ca0c10f14996d661 --- files/keystone_data.sh | 113 +++++++++++++++++++---------------------- 1 file changed, 51 insertions(+), 62 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index ea2d52d114..07b6b601d2 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -28,16 +28,6 @@ export SERVICE_TOKEN=$SERVICE_TOKEN export SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} -function get_id () { - echo `"$@" | awk '/ id / { print $4 }'` -} - -# Lookups -SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") -ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") -MEMBER_ROLE=$(keystone role-list | awk "/ Member / { print \$2 }") - - # Roles # ----- @@ -45,53 +35,52 @@ MEMBER_ROLE=$(keystone role-list | awk "/ Member / { print \$2 }") # The admin role in swift allows a user to act as an admin for their tenant, # but ResellerAdmin is needed for a user to act as any tenant. The name of this # role is also configurable in swift-proxy.conf -RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin) +keystone role-create --name=ResellerAdmin # Service role, so service users do not have to be admins -SERVICE_ROLE=$(get_id keystone role-create --name=service) +keystone role-create --name=service # Services # -------- if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then - NOVA_USER=$(keystone user-list | awk "/ nova / { print \$2 }") # Nova needs ResellerAdmin role to download images when accessing # swift through the s3 api. keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $NOVA_USER \ - --role-id $RESELLER_ROLE + --tenant $SERVICE_TENANT_NAME \ + --user nova \ + --role ResellerAdmin fi # Heat if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then - HEAT_USER=$(get_id keystone user-create --name=heat \ + keystone user-create --name=heat \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=heat@example.com) - keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $HEAT_USER \ - --role-id $SERVICE_ROLE + --tenant $SERVICE_TENANT_NAME \ + --email=heat@example.com + keystone user-role-add --tenant $SERVICE_TENANT_NAME \ + --user heat \ + --role service # heat_stack_user role is for users created by Heat keystone role-create --name heat_stack_user if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - HEAT_CFN_SERVICE=$(get_id keystone service-create \ + keystone service-create \ --name=heat-cfn \ --type=cloudformation \ - --description="Heat CloudFormation Service") + --description="Heat CloudFormation Service" keystone endpoint-create \ --region RegionOne \ - --service_id $HEAT_CFN_SERVICE \ + --service heat-cfn \ --publicurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \ --adminurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \ --internalurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" - HEAT_SERVICE=$(get_id keystone service-create \ + keystone service-create \ --name=heat \ --type=orchestration \ - --description="Heat Service") + --description="Heat Service" keystone endpoint-create \ --region RegionOne \ - --service_id $HEAT_SERVICE \ + --service heat \ --publicurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ --adminurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ --internalurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" @@ -100,23 +89,23 @@ fi # Glance if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then - GLANCE_USER=$(get_id keystone user-create \ + keystone user-create \ --name=glance \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=glance@example.com) + --tenant $SERVICE_TENANT_NAME \ + --email=glance@example.com keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $GLANCE_USER \ - --role-id $ADMIN_ROLE + --tenant $SERVICE_TENANT_NAME \ + --user glance \ + --role admin if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - GLANCE_SERVICE=$(get_id keystone service-create \ + keystone service-create \ --name=glance \ --type=image \ - --description="Glance Image Service") + --description="Glance Image Service" keystone endpoint-create \ --region RegionOne \ - --service_id $GLANCE_SERVICE \ + --service glance \ --publicurl "http://$SERVICE_HOST:9292" \ --adminurl "http://$SERVICE_HOST:9292" \ --internalurl "http://$SERVICE_HOST:9292" @@ -125,25 +114,25 @@ fi # Ceilometer if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then - CEILOMETER_USER=$(get_id keystone user-create --name=ceilometer \ + keystone user-create --name=ceilometer \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=ceilometer@example.com) - keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $CEILOMETER_USER \ - --role-id $ADMIN_ROLE + --tenant $SERVICE_TENANT_NAME \ + --email=ceilometer@example.com + keystone user-role-add --tenant $SERVICE_TENANT_NAME \ + --user ceilometer \ + --role admin # Ceilometer needs ResellerAdmin role to access swift account stats. - keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $CEILOMETER_USER \ - --role-id $RESELLER_ROLE + keystone user-role-add --tenant $SERVICE_TENANT_NAME \ + --user ceilometer \ + --role ResellerAdmin if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - CEILOMETER_SERVICE=$(get_id keystone service-create \ + keystone service-create \ --name=ceilometer \ --type=metering \ - --description="Ceilometer Service") + --description="Ceilometer Service" keystone endpoint-create \ --region RegionOne \ - --service_id $CEILOMETER_SERVICE \ + --service ceilometer \ --publicurl "http://$SERVICE_HOST:8777" \ --adminurl "http://$SERVICE_HOST:8777" \ --internalurl "http://$SERVICE_HOST:8777" @@ -153,13 +142,13 @@ fi # EC2 if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - EC2_SERVICE=$(get_id keystone service-create \ + keystone service-create \ --name=ec2 \ --type=ec2 \ - --description="EC2 Compatibility Layer") + --description="EC2 Compatibility Layer" keystone endpoint-create \ --region RegionOne \ - --service_id $EC2_SERVICE \ + --service ec2 \ --publicurl "http://$SERVICE_HOST:8773/services/Cloud" \ --adminurl "http://$SERVICE_HOST:8773/services/Admin" \ --internalurl "http://$SERVICE_HOST:8773/services/Cloud" @@ -169,13 +158,13 @@ fi # S3 if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift3" ]]; then if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - S3_SERVICE=$(get_id keystone service-create \ + keystone service-create \ --name=s3 \ --type=s3 \ - --description="S3") + --description="S3" keystone endpoint-create \ --region RegionOne \ - --service_id $S3_SERVICE \ + --service s3 \ --publicurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ --adminurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ --internalurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" @@ -185,14 +174,14 @@ fi if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then # Tempest has some tests that validate various authorization checks # between two regular users in separate tenants - ALT_DEMO_TENANT=$(get_id keystone tenant-create \ - --name=alt_demo) - ALT_DEMO_USER=$(get_id keystone user-create \ + keystone tenant-create \ + --name=alt_demo + keystone user-create \ --name=alt_demo \ --pass="$ADMIN_PASSWORD" \ - --email=alt_demo@example.com) + --email=alt_demo@example.com keystone user-role-add \ - --tenant-id $ALT_DEMO_TENANT \ - --user-id $ALT_DEMO_USER \ - --role-id $MEMBER_ROLE + --tenant alt_demo \ + --user alt_demo \ + --role Member fi From 0f9a1b058423b293935b414b2035713d8ead3e71 Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Sat, 28 Dec 2013 03:42:07 +0000 Subject: [PATCH 0327/4438] Migrating trove to entry points partially implements blueprint entrypoints-for-binscripts Change-Id: Iaafde0ab7f27598d566fc008fba7eddc582139c9 --- lib/trove | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/lib/trove b/lib/trove index 6d5a56e456..f8e3eddfe2 100644 --- a/lib/trove +++ b/lib/trove @@ -30,7 +30,13 @@ TROVECLIENT_DIR=$DEST/python-troveclient TROVE_CONF_DIR=/etc/trove TROVE_LOCAL_CONF_DIR=$TROVE_DIR/etc/trove TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove} -TROVE_BIN_DIR=/usr/local/bin + +# Support entry points installation of console scripts +if [[ -d $TROVE_DIR/bin ]]; then + TROVE_BIN_DIR=$TROVE_DIR/bin +else + TROVE_BIN_DIR=$(get_python_exec_prefix) +fi # setup_trove_logging() - Adds logging configuration to conf files function setup_trove_logging() { @@ -178,14 +184,14 @@ function init_trove() { recreate_database trove utf8 #Initialize the trove database - $TROVE_DIR/bin/trove-manage db_sync + $TROVE_BIN_DIR/trove-manage db_sync } # start_trove() - Start running processes, including screen function start_trove() { - screen_it tr-api "cd $TROVE_DIR; bin/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug 2>&1" - screen_it tr-tmgr "cd $TROVE_DIR; bin/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug 2>&1" - screen_it tr-cond "cd $TROVE_DIR; bin/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug 2>&1" + screen_it tr-api "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug 2>&1" + screen_it tr-tmgr "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug 2>&1" + screen_it tr-cond "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug 2>&1" } # stop_trove() - Stop running processes From 3ee52c81a12f1b823c1bc22e39d9f09a8d8b2ca8 Mon Sep 17 00:00:00 2001 From: Ben Nemec Date: Thu, 12 Dec 2013 19:26:12 +0000 Subject: [PATCH 0328/4438] Ensure hostname resolves correctly rabbitmq hangs on startup if the unqualified hostname for the system doesn't resolve properly. This change ensures that the hostname is added to /etc/hosts so that will never happen with devstack. Change-Id: I2c250f38f9feb18d1a59f3a457c6d01c1d98499c --- stack.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/stack.sh b/stack.sh index ce5fbd47e5..3ee6b1cde2 100755 --- a/stack.sh +++ b/stack.sh @@ -234,6 +234,13 @@ safe_chmod 0755 $DEST # a basic test for $DEST path permissions (fatal on error unless skipped) check_path_perm_sanity ${DEST} +# Certain services such as rabbitmq require that the local hostname resolves +# correctly. Make sure it exists in /etc/hosts so that is always true. +LOCAL_HOSTNAME=`hostname -s` +if [ -z "`grep ^127.0.0.1 /etc/hosts | grep $LOCAL_HOSTNAME`" ]; then + sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts +fi + # Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without # Internet access. ``stack.sh`` must have been previously run with Internet # access to install prerequisites and fetch repositories. From 00b434182e3c04976e03b94490359fa26e71ef69 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 2 Jan 2014 10:33:21 +0000 Subject: [PATCH 0329/4438] Handle more nicely when role root is already here When using postgresql we were handling the fallback if the role root was already here but this was still printing an error message, try to make it a bit smarter. Closes-Bug: #1265477 Change-Id: Ib3768dd182ab968e81038f900550f641b9a2af5c --- lib/databases/postgresql | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 519479ad68..60e5a33715 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -64,9 +64,13 @@ function configure_database_postgresql { sudo sed -i "/^host/s/all\s\+::1\/128\s\+ident/$DATABASE_USER\t::0\/0\tpassword/" $PG_HBA restart_service postgresql - # If creating the role fails, chances are it already existed. Try to alter it. - sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" || \ - sudo -u root sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" + # Create the role if it's not here or else alter it. + root_roles=$(sudo -u root sudo -u postgres -i psql -t -c "SELECT 'HERE' from pg_roles where rolname='root'") + if [[ ${root_roles} == *HERE ]];then + sudo -u root sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" + else + sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" + fi } function install_database_postgresql { From 0915e0c6bd9d9d370fbf05963704690580af62ec Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Thu, 2 Jan 2014 15:05:41 +0100 Subject: [PATCH 0330/4438] Add oslo.rootwrap to devstack gate oslo.rootwrap recently graduated but was not made part of the devstack-gate. This change is part of a series of changes affecting devstack-gate, config and devstack which will collectively fix this: https://review.openstack.org/#/q/status:open+topic:rootwrap-gate,n,z This should probably be merged once the config and devstack-gate changes are in, so that it can be self-testing. Change-Id: I7b1332c8004845a0dd76e27d871370d41d4524ac --- lib/oslo | 4 ++++ stackrc | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/lib/oslo b/lib/oslo index 816ae9a48a..f644ed76c3 100644 --- a/lib/oslo +++ b/lib/oslo @@ -22,6 +22,7 @@ set +o xtrace # -------- OSLOCFG_DIR=$DEST/oslo.config OSLOMSG_DIR=$DEST/oslo.messaging +OSLORWRAP_DIR=$DEST/oslo.rootwrap # Entry Points # ------------ @@ -37,6 +38,9 @@ function install_oslo() { git_clone $OSLOMSG_REPO $OSLOMSG_DIR $OSLOMSG_BRANCH setup_develop $OSLOMSG_DIR + + git_clone $OSLORWRAP_REPO $OSLORWRAP_DIR $OSLORWRAP_BRANCH + setup_develop $OSLORWRAP_DIR } # cleanup_oslo() - purge possibly old versions of oslo diff --git a/stackrc b/stackrc index 695bdb15d6..3fdc566ed2 100644 --- a/stackrc +++ b/stackrc @@ -136,6 +136,10 @@ OSLOCFG_BRANCH=${OSLOCFG_BRANCH:-master} OSLOMSG_REPO=${OSLOMSG_REPO:-${GIT_BASE}/openstack/oslo.messaging.git} OSLOMSG_BRANCH=${OSLOMSG_BRANCH:-master} +# oslo.rootwrap +OSLORWRAP_REPO=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git} +OSLORWRAP_BRANCH=${OSLORWRAP_BRANCH:-master} + # pbr drives the setuptools configs PBR_REPO=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git} PBR_BRANCH=${PBR_BRANCH:-master} From 05952e3fcc6bdd9ccd1c7980e6a73c527711c08c Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Sun, 5 Jan 2014 07:59:06 -0800 Subject: [PATCH 0331/4438] Configuration changes required to support VMware NSX plugin (Formerly known as Nicira NVP plugin). Following Neutron change 79fbeb7ebebc0dfbe143aee96fbc250d1b9e7582, this patch introduces the new naming scheme for Neutron VMware NSX plugin configuration. Related-blueprint: nvp-third-part-support (aka bp vmware-nsx-third-party) Partial-implements blueprint: nicira-plugin-renaming Change-Id: If7790887661507bfdec6d2b97c0f99609039aa73 --- exercises/neutron-adv-test.sh | 4 +- lib/neutron_plugins/{nicira => vmware_nsx} | 77 ++++++++++--------- lib/neutron_thirdparty/{nicira => vmware_nsx} | 62 +++++++-------- 3 files changed, 72 insertions(+), 71 deletions(-) rename lib/neutron_plugins/{nicira => vmware_nsx} (59%) rename lib/neutron_thirdparty/{nicira => vmware_nsx} (50%) diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index 0a100c0fe8..0c0d42f458 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -400,10 +400,10 @@ main() { echo Description echo echo Copyright 2012, Cisco Systems - echo Copyright 2012, Nicira Networks, Inc. + echo Copyright 2012, VMware, Inc. echo Copyright 2012, NTT MCL, Inc. echo - echo Please direct any questions to dedutta@cisco.com, dan@nicira.com, nachi@nttmcl.com + echo Please direct any questions to dedutta@cisco.com, dwendlandt@vmware.com, nachi@nttmcl.com echo diff --git a/lib/neutron_plugins/nicira b/lib/neutron_plugins/vmware_nsx similarity index 59% rename from lib/neutron_plugins/nicira rename to lib/neutron_plugins/vmware_nsx index 87d3c3d17b..d506cb6f8d 100644 --- a/lib/neutron_plugins/nicira +++ b/lib/neutron_plugins/vmware_nsx @@ -1,5 +1,5 @@ -# Neutron Nicira NVP plugin -# --------------------------- +# Neutron VMware NSX plugin +# ------------------------- # Save trace setting MY_XTRACE=$(set +o | grep xtrace) @@ -9,10 +9,10 @@ source $TOP_DIR/lib/neutron_plugins/ovs_base function setup_integration_bridge() { _neutron_ovs_base_setup_bridge $OVS_BRIDGE - # Set manager to NVP controller (1st of list) - if [[ "$NVP_CONTROLLERS" != "" ]]; then + # Set manager to NSX controller (1st of list) + if [[ "$NSX_CONTROLLERS" != "" ]]; then # Get the first controller - controllers=(${NVP_CONTROLLERS//,/ }) + controllers=(${NSX_CONTROLLERS//,/ }) OVS_MGR_IP=${controllers[0]} else die $LINENO "Error - No controller specified. Unable to set a manager for OVS" @@ -21,7 +21,7 @@ function setup_integration_bridge() { } function is_neutron_ovs_base_plugin() { - # NVP uses OVS, but not the l3-agent + # NSX uses OVS, but not the l3-agent return 0 } @@ -33,14 +33,15 @@ function neutron_plugin_create_nova_conf() { } function neutron_plugin_install_agent_packages() { - # Nicira Plugin does not run q-agt, but it currently needs dhcp and metadata agents + # VMware NSX Plugin does not run q-agt, but it currently needs dhcp and metadata agents _neutron_ovs_base_install_agent_packages } function neutron_plugin_configure_common() { - Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nicira - Q_PLUGIN_CONF_FILENAME=nvp.ini - Q_DB_NAME="neutron_nvp" + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware + Q_PLUGIN_CONF_FILENAME=nsx.ini + Q_DB_NAME="neutron_nsx" + # TODO(armando-migliaccio): rename this once the code rename is complete Q_PLUGIN_CLASS="neutron.plugins.nicira.NeutronPlugin.NvpPluginV2" } @@ -57,76 +58,76 @@ function neutron_plugin_configure_dhcp_agent() { } function neutron_plugin_configure_l3_agent() { - # Nicira plugin does not run L3 agent - die $LINENO "q-l3 should must not be executed with Nicira plugin!" + # VMware NSX plugin does not run L3 agent + die $LINENO "q-l3 should must not be executed with VMware NSX plugin!" } function neutron_plugin_configure_plugin_agent() { - # Nicira plugin does not run L2 agent - die $LINENO "q-agt must not be executed with Nicira plugin!" + # VMware NSX plugin does not run L2 agent + die $LINENO "q-agt must not be executed with VMware NSX plugin!" } function neutron_plugin_configure_service() { if [[ "$MAX_LP_PER_BRIDGED_LS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE nvp max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS + iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS fi if [[ "$MAX_LP_PER_OVERLAY_LS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE nvp max_lp_per_overlay_ls $MAX_LP_PER_OVERLAY_LS + iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_overlay_ls $MAX_LP_PER_OVERLAY_LS fi if [[ "$FAILOVER_TIME" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE nvp failover_time $FAILOVER_TIME + iniset /$Q_PLUGIN_CONF_FILE nsx failover_time $FAILOVER_TIME fi if [[ "$CONCURRENT_CONNECTIONS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE nvp concurrent_connections $CONCURRENT_CONNECTIONS + iniset /$Q_PLUGIN_CONF_FILE nsx concurrent_connections $CONCURRENT_CONNECTIONS fi if [[ "$DEFAULT_TZ_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_tz_uuid $DEFAULT_TZ_UUID else - die $LINENO "The nicira plugin won't work without a default transport zone." + die $LINENO "The VMware NSX plugin won't work without a default transport zone." fi if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID Q_L3_ENABLED=True Q_L3_ROUTER_PER_TENANT=True - iniset /$Q_PLUGIN_CONF_FILE nvp metadata_mode access_network + iniset /$Q_PLUGIN_CONF_FILE nsx metadata_mode access_network fi if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID fi - # NVP_CONTROLLERS must be a comma separated string - if [[ "$NVP_CONTROLLERS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_controllers $NVP_CONTROLLERS + # NSX_CONTROLLERS must be a comma separated string + if [[ "$NSX_CONTROLLERS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_controllers $NSX_CONTROLLERS else - die $LINENO "The nicira plugin needs at least an NVP controller." + die $LINENO "The VMware NSX plugin needs at least an NSX controller." fi - if [[ "$NVP_USER" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_user $NVP_USER + if [[ "$NSX_USER" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_user $NSX_USER fi - if [[ "$NVP_PASSWORD" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_password $NVP_PASSWORD + if [[ "$NSX_PASSWORD" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_password $NSX_PASSWORD fi - if [[ "$NVP_REQ_TIMEOUT" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT req_timeout $NVP_REQ_TIMEOUT + if [[ "$NSX_REQ_TIMEOUT" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT req_timeout $NSX_REQ_TIMEOUT fi - if [[ "$NVP_HTTP_TIMEOUT" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT http_timeout $NVP_HTTP_TIMEOUT + if [[ "$NSX_HTTP_TIMEOUT" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT http_timeout $NSX_HTTP_TIMEOUT fi - if [[ "$NVP_RETRIES" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT retries $NVP_RETRIES + if [[ "$NSX_RETRIES" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT retries $NSX_RETRIES fi - if [[ "$NVP_REDIRECTS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT redirects $NVP_REDIRECTS + if [[ "$NSX_REDIRECTS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT redirects $NSX_REDIRECTS fi if [[ "$AGENT_MODE" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE nvp agent_mode $AGENT_MODE + iniset /$Q_PLUGIN_CONF_FILE nsx agent_mode $AGENT_MODE if [[ "$AGENT_MODE" == "agentless" ]]; then if [[ "$DEFAULT_SERVICE_CLUSTER_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_service_cluster_uuid $DEFAULT_SERVICE_CLUSTER_UUID else die $LINENO "Agentless mode requires a service cluster." fi - iniset /$Q_PLUGIN_CONF_FILE nvp_metadata metadata_server_address $Q_META_DATA_IP + iniset /$Q_PLUGIN_CONF_FILE nsx_metadata metadata_server_address $Q_META_DATA_IP fi fi } diff --git a/lib/neutron_thirdparty/nicira b/lib/neutron_thirdparty/vmware_nsx similarity index 50% rename from lib/neutron_thirdparty/nicira rename to lib/neutron_thirdparty/vmware_nsx index a24392cd4d..70d348274f 100644 --- a/lib/neutron_thirdparty/nicira +++ b/lib/neutron_thirdparty/vmware_nsx @@ -1,14 +1,14 @@ -# Nicira NVP +# VMware NSX # ---------- # This third-party addition can be used to configure connectivity between a DevStack instance -# and an NVP Gateway in dev/test environments. In order to use this correctly, the following +# and an NSX Gateway in dev/test environments. In order to use this correctly, the following # env variables need to be set (e.g. in your localrc file): # -# * enable_service nicira --> to execute this third-party addition +# * enable_service vmware_nsx --> to execute this third-party addition # * PUBLIC_BRIDGE --> bridge used for external connectivity, typically br-ex -# * NVP_GATEWAY_NETWORK_INTERFACE --> interface used to communicate with the NVP Gateway -# * NVP_GATEWAY_NETWORK_CIDR --> CIDR to configure br-ex, e.g. 172.24.4.211/24 +# * NSX_GATEWAY_NETWORK_INTERFACE --> interface used to communicate with the NSX Gateway +# * NSX_GATEWAY_NETWORK_CIDR --> CIDR to configure br-ex, e.g. 172.24.4.211/24 # Save trace setting MY_XTRACE=$(set +o | grep xtrace) @@ -17,64 +17,64 @@ set +o xtrace # This is the interface that connects the Devstack instance # to an network that allows it to talk to the gateway for # testing purposes -NVP_GATEWAY_NETWORK_INTERFACE=${NVP_GATEWAY_NETWORK_INTERFACE:-eth2} -# Re-declare floating range as it's needed also in stop_nicira, which +NSX_GATEWAY_NETWORK_INTERFACE=${NSX_GATEWAY_NETWORK_INTERFACE:-eth2} +# Re-declare floating range as it's needed also in stop_vmware_nsx, which # is invoked by unstack.sh FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} -function configure_nicira() { +function configure_vmware_nsx() { : } -function init_nicira() { - if ! is_set NVP_GATEWAY_NETWORK_CIDR; then - NVP_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} +function init_vmware_nsx() { + if ! is_set NSX_GATEWAY_NETWORK_CIDR; then + NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address to set on br-ex was not specified. " - echo "Defaulting to "$NVP_GATEWAY_NETWORK_CIDR + echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR fi # Make sure the interface is up, but not configured - sudo ip link set $NVP_GATEWAY_NETWORK_INTERFACE up + sudo ip link set $NSX_GATEWAY_NETWORK_INTERFACE up # Save and then flush the IP addresses on the interface - addresses=$(ip addr show dev $NVP_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'}) - sudo ip addr flush $NVP_GATEWAY_NETWORK_INTERFACE - # Use the PUBLIC Bridge to route traffic to the NVP gateway + addresses=$(ip addr show dev $NSX_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'}) + sudo ip addr flush $NSX_GATEWAY_NETWORK_INTERFACE + # Use the PUBLIC Bridge to route traffic to the NSX gateway # NOTE(armando-migliaccio): if running in a nested environment this will work # only with mac learning enabled, portsecurity and security profiles disabled - # The public bridge might not exist for the NVP plugin if Q_USE_DEBUG_COMMAND is off + # The public bridge might not exist for the NSX plugin if Q_USE_DEBUG_COMMAND is off # Try to create it anyway sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE - sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_INTERFACE - nvp_gw_net_if_mac=$(ip link show $NVP_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}') - sudo ip link set address $nvp_gw_net_if_mac dev $PUBLIC_BRIDGE + sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_INTERFACE + nsx_gw_net_if_mac=$(ip link show $NSX_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}') + sudo ip link set address $nsx_gw_net_if_mac dev $PUBLIC_BRIDGE for address in $addresses; do sudo ip addr add dev $PUBLIC_BRIDGE $address done - sudo ip addr add dev $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_CIDR + sudo ip addr add dev $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_CIDR } -function install_nicira() { +function install_vmware_nsx() { : } -function start_nicira() { +function start_vmware_nsx() { : } -function stop_nicira() { - if ! is_set NVP_GATEWAY_NETWORK_CIDR; then - NVP_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} +function stop_vmware_nsx() { + if ! is_set NSX_GATEWAY_NETWORK_CIDR; then + NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address expected on br-ex was not specified. " - echo "Defaulting to "$NVP_GATEWAY_NETWORK_CIDR + echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR fi - sudo ip addr del $NVP_GATEWAY_NETWORK_CIDR dev $PUBLIC_BRIDGE + sudo ip addr del $NSX_GATEWAY_NETWORK_CIDR dev $PUBLIC_BRIDGE # Save and then flush remaining addresses on the interface addresses=$(ip addr show dev $PUBLIC_BRIDGE | grep inet | awk {'print $2'}) sudo ip addr flush $PUBLIC_BRIDGE # Try to detach physical interface from PUBLIC_BRIDGE - sudo ovs-vsctl del-port $NVP_GATEWAY_NETWORK_INTERFACE - # Restore addresses on NVP_GATEWAY_NETWORK_INTERFACE + sudo ovs-vsctl del-port $NSX_GATEWAY_NETWORK_INTERFACE + # Restore addresses on NSX_GATEWAY_NETWORK_INTERFACE for address in $addresses; do - sudo ip addr add dev $NVP_GATEWAY_NETWORK_INTERFACE $address + sudo ip addr add dev $NSX_GATEWAY_NETWORK_INTERFACE $address done } From 21fe4e76d5453a252e802c5d5f487f88b896decf Mon Sep 17 00:00:00 2001 From: Vincent Hou Date: Thu, 21 Nov 2013 03:10:27 -0500 Subject: [PATCH 0332/4438] Add a flexible API version choice for Cinder, Glance and Heat The version of the authentication url is set to v1.0 for some projects by default. We can make it configurable via the parameter "$IDENTITY_API_VERSION". Closes-Bug: #1253539 Change-Id: I6640e345d1317b1308403c95b13f8a998320241b --- lib/cinder | 2 +- lib/glance | 4 ++-- lib/heat | 2 +- lib/keystone | 8 ++++++++ 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/lib/cinder b/lib/cinder index cbe732e9b0..099cfda7fd 100644 --- a/lib/cinder +++ b/lib/cinder @@ -341,7 +341,7 @@ function configure_cinder() { -e 's/snapshot_autoextend_percent =.*/snapshot_autoextend_percent = 20/' \ /etc/lvm/lvm.conf fi - iniset $CINDER_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT + configure_API_version $CINDER_CONF $IDENTITY_API_VERSION iniset $CINDER_CONF keystone_authtoken admin_user cinder iniset $CINDER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $CINDER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD diff --git a/lib/glance b/lib/glance index 135136db7e..321174e619 100644 --- a/lib/glance +++ b/lib/glance @@ -83,7 +83,7 @@ function configure_glance() { iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL iniset $GLANCE_REGISTRY_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA - iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + configure_API_version $GLANCE_REGISTRY_CONF $IDENTITY_API_VERSION iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_password $SERVICE_PASSWORD @@ -101,7 +101,7 @@ function configure_glance() { iniset $GLANCE_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $GLANCE_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL iniset $GLANCE_API_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA - iniset $GLANCE_API_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + configure_API_version $GLANCE_API_CONF $IDENTITY_API_VERSION iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $GLANCE_API_CONF keystone_authtoken admin_user glance iniset $GLANCE_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD diff --git a/lib/heat b/lib/heat index e44a618162..59fd3d7a7a 100644 --- a/lib/heat +++ b/lib/heat @@ -95,7 +95,7 @@ function configure_heat() { iniset $HEAT_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST iniset $HEAT_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $HEAT_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $HEAT_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + configure_API_version $HEAT_CONF $IDENTITY_API_VERSION iniset $HEAT_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $HEAT_CONF keystone_authtoken admin_user heat diff --git a/lib/keystone b/lib/keystone index 29b9604efe..79f1fd9e84 100644 --- a/lib/keystone +++ b/lib/keystone @@ -335,6 +335,14 @@ create_keystone_accounts() { fi } +# Configure the API version for the OpenStack projects. +# configure_API_version conf_file version +function configure_API_version() { + local conf_file=$1 + local api_version=$2 + iniset $conf_file keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$api_version +} + # init_keystone() - Initialize databases, etc. function init_keystone() { if is_service_enabled ldap; then From 74103f2b3ffd047a4582ae9d37a057534cb6cce7 Mon Sep 17 00:00:00 2001 From: Nikhil Manchanda Date: Fri, 3 Jan 2014 13:53:14 -0800 Subject: [PATCH 0333/4438] Handle trove service availabilty in tempest. Partially implements blueprint: trove-tempest Change-Id: I5413a7afeffe670f6972b41d61dd27ed05da5ba2 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 95b300ce77..08c0553f03 100644 --- a/lib/tempest +++ b/lib/tempest @@ -329,7 +329,7 @@ function configure_tempest() { iniset $TEMPEST_CONF network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}" # service_available - for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna; do + for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna trove; do if is_service_enabled $service ; then iniset $TEMPEST_CONF service_available $service "True" else From def4c141f1d917705ac1dbdbfe7525f47382dada Mon Sep 17 00:00:00 2001 From: Kaitlin Farr Date: Mon, 6 Jan 2014 08:52:49 -0500 Subject: [PATCH 0334/4438] Adds default value for fixed_key Adds a default value for fixed_key, for use by a key manager implementation that reads the key from the configuration settings. This single, fixed key proffers no protection if the key is compromised. The current implementation of the key manager does not work correctly if the key is not set, so including this option is helpful for Tempest testing and volume encryption within DevStack. Implements: blueprint encrypt-cinder-volumes Change-Id: Id83060afc862c793b79b5429355b213cb4c173fd https://blueprints.launchpad.net/nova/+spec/encrypt-cinder-volumes --- stack.sh | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/stack.sh b/stack.sh index 2438f9fffc..558f71a3a4 100755 --- a/stack.sh +++ b/stack.sh @@ -1098,6 +1098,15 @@ if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nov iniset $NOVA_CONF DEFAULT s3_affix_tenant "True" fi +# Create a randomized default value for the keymgr's fixed_key +if is_service_enabled nova; then + FIXED_KEY="" + for i in $(seq 1 64); + do FIXED_KEY+=$(echo "obase=16; $(($RANDOM % 16))" | bc); + done; + iniset $NOVA_CONF keymgr fixed_key "$FIXED_KEY" +fi + if is_service_enabled zeromq; then echo_summary "Starting zermomq receiver" screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver" From 4ad37630a2f938b19697f6e310def046a4dcca48 Mon Sep 17 00:00:00 2001 From: Juan Manuel Olle Date: Mon, 6 Jan 2014 15:07:09 -0300 Subject: [PATCH 0335/4438] Remove duplicated name services Due to the fact that keystone will not allow services with duplicated names, cinder and nova services names were changed Closes-Bug: #1259425 Change-Id: I988aef477b418a289426e02e5e108aa57dd1076b --- lib/cinder | 2 +- lib/nova | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index cbe732e9b0..a0b33c8c88 100644 --- a/lib/cinder +++ b/lib/cinder @@ -385,7 +385,7 @@ create_cinder_accounts() { --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" CINDER_V2_SERVICE=$(keystone service-create \ - --name=cinder \ + --name=cinderv2 \ --type=volumev2 \ --description="Cinder Volume Service V2" \ | grep " id " | get_field 2) diff --git a/lib/nova b/lib/nova index e754341bad..e9f87fce1f 100644 --- a/lib/nova +++ b/lib/nova @@ -338,7 +338,7 @@ create_nova_accounts() { --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" NOVA_V3_SERVICE=$(keystone service-create \ - --name=nova \ + --name=novav3 \ --type=computev3 \ --description="Nova Compute Service V3" \ | grep " id " | get_field 2) From 085abd8eb7c744170cd92429b9aea9d07fd4458b Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 11 Dec 2013 12:21:12 +0000 Subject: [PATCH 0336/4438] Fix xenapi functions' tests The tests got outdated, this fix makes the tests pass again. Change-Id: Iadddfbf34bf79ba455811645e766c2f3d0fcca84 --- tools/xen/mocks | 2 +- tools/xen/test_functions.sh | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/tools/xen/mocks b/tools/xen/mocks index 94b0ca4d02..ec8679e816 100644 --- a/tools/xen/mocks +++ b/tools/xen/mocks @@ -73,7 +73,7 @@ function [ { done return 1 fi - echo "Mock test does not implement the requested function" + echo "Mock test does not implement the requested function: ${1:-}" exit 1 } diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh index 0ae2cb7f9a..14551868e1 100755 --- a/tools/xen/test_functions.sh +++ b/tools/xen/test_functions.sh @@ -118,7 +118,7 @@ function test_zip_snapshot_location { function test_create_directory_for_kernels { ( . mocks - mock_out get_local_sr uuid1 + mock_out get_local_sr_path /var/run/sr-mount/uuid1 create_directory_for_kernels ) @@ -141,7 +141,7 @@ EOF function test_create_directory_for_images { ( . mocks - mock_out get_local_sr uuid1 + mock_out get_local_sr_path /var/run/sr-mount/uuid1 create_directory_for_images ) @@ -199,8 +199,7 @@ function test_get_local_sr { [ "$RESULT" == "uuid123" ] - assert_xe_min - assert_xe_param "sr-list" "name-label=Local storage" + assert_xe_param "pool-list" params=default-SR minimal=true } function test_get_local_sr_path { From 2781f3bfc3e0ceca29457f65adfddb63f01d8059 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 11 Dec 2013 13:41:54 +0000 Subject: [PATCH 0337/4438] Workaround missing zip snapshot At the moment, xenserver installation depends on github snapshots. Unfortunately, git.openstack.org does not have that capability. This fix includes: - Exit with error code, if a download fails - create proper urls, even if they are using the git protocol - set git base to github - so we are able to do snapshots Fixes bug: 1259905 Change-Id: I8d0cf8bf8abb16ee0a4b138a6719409c75e7a146 --- tools/xen/README.md | 3 +++ tools/xen/functions | 15 +++++++++++++-- tools/xen/mocks | 6 +++++- tools/xen/test_functions.sh | 21 +++++++++++++++++---- 4 files changed, 38 insertions(+), 7 deletions(-) diff --git a/tools/xen/README.md b/tools/xen/README.md index 06192ed2b7..ee1abcc091 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -70,6 +70,9 @@ the `XENAPI_PASSWORD` must be your dom0 root password. Of course, use real passwords if this machine is exposed. cat > ./localrc <&2 + exit 1 +} + function xapi_plugin_location { for PLUGIN_DIR in "/etc/xapi.d/plugins/" "/usr/lib/xcp/plugins/" "/usr/lib/xapi/plugins"; do if [ -d $PLUGIN_DIR ]; then @@ -11,7 +20,7 @@ function xapi_plugin_location { } function zip_snapshot_location { - echo $1 | sed "s:\.git$::;s:$:/zipball/$2:g" + echo $1 | sed "s,^git://,http://,g;s:\.git$::;s:$:/zipball/$2:g" } function create_directory_for_kernels { @@ -41,7 +50,9 @@ function extract_remote_zipball { local EXTRACTED_FILES=$(mktemp -d) { - wget -nv $ZIPBALL_URL -O $LOCAL_ZIPBALL --no-check-certificate + if ! wget -nv $ZIPBALL_URL -O $LOCAL_ZIPBALL --no-check-certificate; then + die_with_error "Failed to download [$ZIPBALL_URL]" + fi unzip -q -o $LOCAL_ZIPBALL -d $EXTRACTED_FILES rm -f $LOCAL_ZIPBALL } >&2 diff --git a/tools/xen/mocks b/tools/xen/mocks index ec8679e816..3b9b05c747 100644 --- a/tools/xen/mocks +++ b/tools/xen/mocks @@ -35,7 +35,7 @@ function mktemp { function wget { if [[ $@ =~ "failurl" ]]; then - exit 1 + return 1 fi echo "wget $@" >> $LIST_OF_ACTIONS } @@ -77,6 +77,10 @@ function [ { exit 1 } +function die_with_error { + echo "$1" >> $DEAD_MESSAGES +} + function xe { cat $XE_RESPONSE { diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh index 14551868e1..373d996760 100755 --- a/tools/xen/test_functions.sh +++ b/tools/xen/test_functions.sh @@ -29,6 +29,9 @@ function before_each_test { XE_CALLS=$(mktemp) truncate -s 0 $XE_CALLS + + DEAD_MESSAGES=$(mktemp) + truncate -s 0 $DEAD_MESSAGES } # Teardown @@ -64,6 +67,10 @@ function assert_xe_param { grep -qe "^$1\$" $XE_CALLS } +function assert_died_with { + diff -u <(echo "$1") $DEAD_MESSAGES +} + function mock_out { local FNNAME="$1" local OUTPUT="$2" @@ -109,10 +116,16 @@ function test_no_plugin_directory_found { grep "[ -d /usr/lib/xcp/plugins/ ]" $LIST_OF_ACTIONS } -function test_zip_snapshot_location { +function test_zip_snapshot_location_http { diff \ - <(zip_snapshot_location "git://git.openstack.org/openstack/nova.git" "master") \ - <(echo "git://git.openstack.org/openstack/nova/zipball/master") + <(zip_snapshot_location "http://github.com/openstack/nova.git" "master") \ + <(echo "http://github.com/openstack/nova/zipball/master") +} + +function test_zip_snapsot_location_git { + diff \ + <(zip_snapshot_location "git://github.com/openstack/nova.git" "master") \ + <(echo "http://github.com/openstack/nova/zipball/master") } function test_create_directory_for_kernels { @@ -179,7 +192,7 @@ function test_extract_remote_zipball_wget_fail { local IGNORE IGNORE=$(. mocks && extract_remote_zipball "failurl") - assert_previous_command_failed + assert_died_with "Failed to download [failurl]" } function test_find_nova_plugins { From f93b98ac7309e3ebd106b44843650a161fad4616 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Wed, 8 Jan 2014 18:15:14 +0900 Subject: [PATCH 0338/4438] gitignore: add .localrc.auto and local.conf The changeset of 893e66360caf3bcf0578d4541b3c17d089c33b02, Change-Id of I367cadc86116621e9574ac203aafdab483d810d3 introduced local.conf and generates .localrc.auto. But they aren't in .gitignore. This patch adds them into .gitignore. Change-Id: I7d4dc99d980d9c5b5156cf915646bc96163a3dc4 Closes-Bug: #1267027 --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index c49b4a3287..43652024f3 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,5 @@ accrc devstack-docs-* docs/ docs-files +.localrc.auto +local.conf From 96f8e34c38f172689f09842761dd20600a60fc5a Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Mon, 9 Sep 2013 14:22:07 -0700 Subject: [PATCH 0339/4438] Enable multi-threaded nova-conductor Just like I09f4c6f57e71982b8c7fc92645b3ebec12ff1348, enable multi-threaded nova-conductor. This feature was merged into nova in I8698997d211d7617ee14a1c6113056a694d70620. Change-Id: Id7042284e81bd64092a400d24a3170ce07beb08c --- lib/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/nova b/lib/nova index e754341bad..39685a835a 100644 --- a/lib/nova +++ b/lib/nova @@ -377,6 +377,7 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT osapi_compute_workers "4" iniset $NOVA_CONF DEFAULT ec2_workers "4" iniset $NOVA_CONF DEFAULT metadata_workers "4" + iniset $NOVA_CONF conductor workers "4" iniset $NOVA_CONF DEFAULT sql_connection `database_connection_url nova` iniset $NOVA_CONF DEFAULT fatal_deprecations "True" iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" From 25049cd23de0e8055326c668ff119dd8cdf0bae4 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Thu, 9 Jan 2014 13:53:52 +0100 Subject: [PATCH 0340/4438] Use --tenant-id, not --tenant_id Change-Id: I0e3d65d5b69ac82cbf7ee6ffc41ead369af8c126 --- lib/cinder | 2 +- lib/ironic | 4 ++-- lib/marconi | 2 +- lib/neutron | 14 +++++++------- lib/nova | 2 +- lib/savanna | 2 +- lib/swift | 2 +- lib/trove | 2 +- 8 files changed, 15 insertions(+), 15 deletions(-) diff --git a/lib/cinder b/lib/cinder index cbe732e9b0..fe278f60bb 100644 --- a/lib/cinder +++ b/lib/cinder @@ -365,7 +365,7 @@ create_cinder_accounts() { CINDER_USER=$(keystone user-create \ --name=cinder \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=cinder@example.com \ | grep " id " | get_field 2) keystone user-role-add \ diff --git a/lib/ironic b/lib/ironic index 099746ae22..1ff3c81f06 100644 --- a/lib/ironic +++ b/lib/ironic @@ -149,11 +149,11 @@ create_ironic_accounts() { IRONIC_USER=$(keystone user-create \ --name=ironic \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=ironic@example.com \ | grep " id " | get_field 2) keystone user-role-add \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --user_id $IRONIC_USER \ --role_id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then diff --git a/lib/marconi b/lib/marconi index 742f866e7d..6b9ffdc0b3 100644 --- a/lib/marconi +++ b/lib/marconi @@ -142,7 +142,7 @@ function create_marconi_accounts() { MARCONI_USER=$(get_id keystone user-create --name=marconi \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=marconi@example.com) keystone user-role-add --tenant-id $SERVICE_TENANT \ --user-id $MARCONI_USER \ diff --git a/lib/neutron b/lib/neutron index a7519ad328..43f43f951a 100644 --- a/lib/neutron +++ b/lib/neutron @@ -328,7 +328,7 @@ function create_neutron_accounts() { NEUTRON_USER=$(keystone user-create \ --name=neutron \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=neutron@example.com \ | grep " id " | get_field 2) keystone user-role-add \ @@ -357,7 +357,7 @@ function create_neutron_initial_network() { # Create a small network # Since neutron command is executed in admin context at this point, - # ``--tenant_id`` needs to be specified. + # ``--tenant-id`` needs to be specified. if is_baremetal; then if [[ "$PUBLIC_INTERFACE" == '' || "$OVS_PHYSICAL_BRIDGE" == '' ]]; then die $LINENO "Neutron settings for baremetal not set.. exiting" @@ -367,16 +367,16 @@ function create_neutron_initial_network() { sudo ip addr del $IP dev $PUBLIC_INTERFACE sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE done - NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2) + NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant-id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID" - SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + SUBNET_ID=$(neutron subnet-create --tenant-id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $TENANT_ID" sudo ifconfig $OVS_PHYSICAL_BRIDGE up sudo route add default gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE else - NET_ID=$(neutron net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + NET_ID=$(neutron net-create --tenant-id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID" - SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + SUBNET_ID=$(neutron subnet-create --tenant-id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $TENANT_ID" fi @@ -384,7 +384,7 @@ function create_neutron_initial_network() { # Create a router, and add the private subnet as one of its interfaces if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then # create a tenant-owned router. - ROUTER_ID=$(neutron router-create --tenant_id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2) + ROUTER_ID=$(neutron router-create --tenant-id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2) die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $TENANT_ID $Q_ROUTER_NAME" else # Plugin only supports creating a single router, which should be admin owned. diff --git a/lib/nova b/lib/nova index e754341bad..367ec83072 100644 --- a/lib/nova +++ b/lib/nova @@ -318,7 +318,7 @@ create_nova_accounts() { NOVA_USER=$(keystone user-create \ --name=nova \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=nova@example.com \ | grep " id " | get_field 2) keystone user-role-add \ diff --git a/lib/savanna b/lib/savanna index 6794e36dfd..bb4dfe693d 100644 --- a/lib/savanna +++ b/lib/savanna @@ -56,7 +56,7 @@ function create_savanna_accounts() { SAVANNA_USER=$(keystone user-create \ --name=savanna \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=savanna@example.com \ | grep " id " | get_field 2) keystone user-role-add \ diff --git a/lib/swift b/lib/swift index 96929db557..44c230be93 100644 --- a/lib/swift +++ b/lib/swift @@ -514,7 +514,7 @@ function create_swift_accounts() { ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") SWIFT_USER=$(keystone user-create --name=swift --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT --email=swift@example.com | grep " id " | get_field 2) + --tenant-id $SERVICE_TENANT --email=swift@example.com | grep " id " | get_field 2) keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $SWIFT_USER --role-id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then diff --git a/lib/trove b/lib/trove index f8e3eddfe2..4efdb5d669 100644 --- a/lib/trove +++ b/lib/trove @@ -64,7 +64,7 @@ create_trove_accounts() { TROVE_USER=$(keystone user-create \ --name=trove \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=trove@example.com \ | grep " id " | get_field 2) keystone user-role-add --tenant-id $SERVICE_TENANT \ From 72dc98ed6bcdaa1cdd81c1b655b5cbdf5490291d Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Thu, 9 Jan 2014 21:57:22 +0900 Subject: [PATCH 0341/4438] Correct Qpid package name in files/apts/neutron Ubuntu qpid server package is named as "qpidd", but files/apts/neutron has an entry "qpid". Change-Id: Ie3f8391a7404bdeb222acfcce77ca80a14ea8693 Closes-Bug: #1267459 --- files/apts/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/apts/neutron b/files/apts/neutron index 0f4b69f8ef..4e9f0f7dfd 100644 --- a/files/apts/neutron +++ b/files/apts/neutron @@ -20,6 +20,6 @@ python-qpid # dist:precise dnsmasq-base dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise,quantal rabbitmq-server # NOPRIME -qpid # NOPRIME +qpidd # NOPRIME sqlite3 vlan From fa5ccfff1098bb85eb7810ad5146fbdfee83fb15 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Thu, 9 Jan 2014 13:27:35 +0100 Subject: [PATCH 0342/4438] Setup Keystone catalog information for Ceilometer Change-Id: I3f536f38fe7862ee41b06d1d48b848cc07492c8d Closes-Bug: #1267322 --- files/default_catalog.templates | 5 ++++ lib/ceilometer | 42 +++++++++++++++++++++++++++++++++ stack.sh | 4 ++++ 3 files changed, 51 insertions(+) diff --git a/files/default_catalog.templates b/files/default_catalog.templates index 277904a8e3..430c42a337 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -51,3 +51,8 @@ catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8000/v1 catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8000/v1 catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8000/v1 catalog.RegionOne.orchestration.name = Heat Service + +catalog.RegionOne.metering.publicURL = http://%SERVICE_HOST%:8777/v1 +catalog.RegionOne.metering.adminURL = http://%SERVICE_HOST%:8777/v1 +catalog.RegionOne.metering.internalURL = http://%SERVICE_HOST%:8777/v1 +catalog.RegionOne.metering.name = Telemetry Service diff --git a/lib/ceilometer b/lib/ceilometer index fac3be14a9..fe72fcdb11 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -48,8 +48,50 @@ CEILOMETER_BIN_DIR=$(get_python_exec_prefix) # Set up database backend CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql} +# Ceilometer connection info. +CEILOMETER_SERVICE_PROTOCOL=http +CEILOMETER_SERVICE_HOST=$SERVICE_HOST +CEILOMETER_SERVICE_PORT=${CEILOMETER_SERVICE_PORT:-8777} +# + # Functions # --------- +# +# create_ceilometer_accounts() - Set up common required ceilometer accounts + +create_ceilometer_accounts() { + + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + # Ceilometer + if [[ "$ENABLED_SERVICES" =~ "ceilometer-api" ]]; then + CEILOMETER_USER=$(keystone user-create \ + --name=ceilometer \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=ceilometer@example.com \ + | grep " id " | get_field 2) + keystone user-role-add \ + --tenant-id $SERVICE_TENANT \ + --user-id $CEILOMETER_USER \ + --role-id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + CEILOMETER_SERVICE=$(keystone service-create \ + --name=ceilometer \ + --type=metering \ + --description="OpenStack Telemetry Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $CEILOMETER_SERVICE \ + --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ + --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ + --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" + fi + fi +} + # cleanup_ceilometer() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up diff --git a/stack.sh b/stack.sh index 2438f9fffc..bf782bc047 100755 --- a/stack.sh +++ b/stack.sh @@ -901,6 +901,10 @@ if is_service_enabled key; then create_trove_accounts fi + if is_service_enabled ceilometer; then + create_ceilometer_accounts + fi + if is_service_enabled swift || is_service_enabled s-proxy; then create_swift_accounts fi From 6681a4fae9df92cee77900f2248b8e98c501626f Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 10 Jan 2014 15:28:29 +0900 Subject: [PATCH 0343/4438] bash8: fix bash8 warning This patch removes the following bash8 warnings. > /devstack/ $ ./run_tests.sh > Running bash8... > E003: Indent not multiple of 4: ' wget -c $image_url -O $FILES/$IMAGE_FNAME' > - functions: L1367 > E003: Indent not multiple of 4: ' if [[ $? -ne 0 ]]; then' > - functions: L1368 > E003: Indent not multiple of 4: ' echo "Not found: $image_url"' > - functions: L1369 > E003: Indent not multiple of 4: ' return' > - functions: L1370 > E003: Indent not multiple of 4: ' fi' > - functions: L1371 > E003: Indent not multiple of 4: ' `"should use a descriptor-data pair."' > - functions: L1423 > E003: Indent not multiple of 4: ' `" Attempt to retrieve the *-flat.vmdk: $flat_url"' > - functions: L1438 > E003: Indent not multiple of 4: ' `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"' > - functions: L1477 > E003: Indent not multiple of 4: ' warn $LINENO "Descriptor not found $descriptor_url"' > - functions: L1492 > E003: Indent not multiple of 4: ' descriptor_found=false' > - functions: L1493 > E003: Indent not multiple of 4: ' fi' > - functions: L1501 > E003: Indent not multiple of 4: ' fi' > - functions: L1502 > E003: Indent not multiple of 4: ' #TODO(alegendre): handle streamOptimized once supported by the VMware driver.' > - functions: L1503 > E003: Indent not multiple of 4: ' vmdk_disktype="preallocated"' > - functions: L1504 > 14 bash8 error(s) found Change-Id: Icf2cddf283192a50253ccfa697c2d32eec75b4ba Closes-Bug: #1267716 --- functions | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/functions b/functions index e79e1d58af..6f09685efb 100644 --- a/functions +++ b/functions @@ -1364,11 +1364,11 @@ function upload_image() { if [[ $image_url != file* ]]; then # Downloads the image (uec ami+aki style), then extracts it. if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then - wget -c $image_url -O $FILES/$IMAGE_FNAME - if [[ $? -ne 0 ]]; then - echo "Not found: $image_url" - return - fi + wget -c $image_url -O $FILES/$IMAGE_FNAME + if [[ $? -ne 0 ]]; then + echo "Not found: $image_url" + return + fi fi IMAGE="$FILES/${IMAGE_FNAME}" else @@ -1420,7 +1420,7 @@ function upload_image() { vmdk_create_type="${vmdk_create_type%?}" descriptor_data_pair_msg="Monolithic flat and VMFS disks "` - `"should use a descriptor-data pair." + `"should use a descriptor-data pair." if [[ "$vmdk_create_type" = "monolithicSparse" ]]; then vmdk_disktype="sparse" elif [[ "$vmdk_create_type" = "monolithicFlat" || \ @@ -1435,7 +1435,7 @@ function upload_image() { path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` flat_url="${image_url:0:$path_len}$flat_fname" warn $LINENO "$descriptor_data_pair_msg"` - `" Attempt to retrieve the *-flat.vmdk: $flat_url" + `" Attempt to retrieve the *-flat.vmdk: $flat_url" if [[ $flat_url != file* ]]; then if [[ ! -f $FILES/$flat_fname || \ "$(stat -c "%s" $FILES/$flat_fname)" = "0" ]]; then @@ -1474,7 +1474,7 @@ function upload_image() { flat_path="${image_url:0:$path_len}" descriptor_url=$flat_path$descriptor_fname warn $LINENO "$descriptor_data_pair_msg"` - `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" + `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" if [[ $flat_path != file* ]]; then if [[ ! -f $FILES/$descriptor_fname || \ "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then @@ -1489,8 +1489,8 @@ function upload_image() { descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g") if [[ ! -f $descriptor_url || \ "$(stat -c "%s" $descriptor_url)" == "0" ]]; then - warn $LINENO "Descriptor not found $descriptor_url" - descriptor_found=false + warn $LINENO "Descriptor not found $descriptor_url" + descriptor_found=false fi fi if $descriptor_found; then @@ -1498,10 +1498,10 @@ function upload_image() { `"grep -a -F -m 1 'ddb.adapterType =' $descriptor_url)" vmdk_adapter_type="${vmdk_adapter_type#*\"}" vmdk_adapter_type="${vmdk_adapter_type%?}" - fi - fi - #TODO(alegendre): handle streamOptimized once supported by the VMware driver. - vmdk_disktype="preallocated" + fi + fi + #TODO(alegendre): handle streamOptimized once supported by the VMware driver. + vmdk_disktype="preallocated" else #TODO(alegendre): handle streamOptimized once supported by the VMware driver. vmdk_disktype="preallocated" From d7f6090f29786f091773497bc3597142d94619ec Mon Sep 17 00:00:00 2001 From: Alvaro Lopez Ortega Date: Sun, 22 Dec 2013 17:03:47 +0100 Subject: [PATCH 0344/4438] Add support for Fedora 20 The list of RPM packages have been updated to support the recently released Fedora 20 distribution. Closes-Bug: #1263291 Co-Authored: Alvaro Lopez Ortega Change-Id: Ia66abef1a1a54e6d5ee6eebc12908cef3f1d211d --- files/rpms/cinder | 1 + files/rpms/general | 1 + files/rpms/glance | 5 +++-- files/rpms/horizon | 4 ++-- files/rpms/keystone | 10 +++++----- files/rpms/neutron | 4 ++-- files/rpms/nova | 6 +++--- files/rpms/swift | 2 +- files/rpms/tempest | 2 +- files/rpms/trove | 2 +- stack.sh | 4 ++-- 11 files changed, 22 insertions(+), 19 deletions(-) diff --git a/files/rpms/cinder b/files/rpms/cinder index c4edb68f14..623c13e676 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -4,3 +4,4 @@ qemu-img python-devel postgresql-devel iscsi-initiator-utils +python-lxml #dist:f18,f19,f20 diff --git a/files/rpms/general b/files/rpms/general index 2db31d1db0..40246ea4ab 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -20,6 +20,7 @@ tar tcpdump unzip wget +which # [1] : some of installed tools have unversioned dependencies on this, # but others have versioned (<=0.7). So if a later version (0.7.1) diff --git a/files/rpms/glance b/files/rpms/glance index dd66171f7a..fffd9c85b4 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -1,6 +1,6 @@ gcc libffi-devel # testonly -libxml2-devel +libxml2-devel # testonly libxslt-devel # testonly mysql-devel # testonly openssl-devel # testonly @@ -9,7 +9,8 @@ python-argparse python-devel python-eventlet python-greenlet -python-paste-deploy #dist:f16,f17,f18,f19 +python-lxml #dist:f18,f19,f20 +python-paste-deploy #dist:f18,f19,f20 python-routes python-sqlalchemy python-wsgiref diff --git a/files/rpms/horizon b/files/rpms/horizon index aa27ab4e97..59503cc9aa 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -16,8 +16,8 @@ python-kombu python-migrate python-mox python-nose -python-paste #dist:f16,f17,f18,f19 -python-paste-deploy #dist:f16,f17,f18,f19 +python-paste #dist:f18,f19,f20 +python-paste-deploy #dist:f18,f19,f20 python-routes python-sphinx python-sqlalchemy diff --git a/files/rpms/keystone b/files/rpms/keystone index 52dbf477d8..99e8524628 100644 --- a/files/rpms/keystone +++ b/files/rpms/keystone @@ -1,11 +1,11 @@ python-greenlet -python-lxml #dist:f16,f17,f18,f19 -python-paste #dist:f16,f17,f18,f19 -python-paste-deploy #dist:f16,f17,f18,f19 -python-paste-script #dist:f16,f17,f18,f19 +libxslt-devel # dist:f20 +python-lxml #dist:f18,f19,f20 +python-paste #dist:f18,f19,f20 +python-paste-deploy #dist:f18,f19,f20 +python-paste-script #dist:f18,f19,f20 python-routes python-sqlalchemy -python-sqlite2 python-webob sqlite diff --git a/files/rpms/neutron b/files/rpms/neutron index a7700f77d4..67bf52350a 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron @@ -11,8 +11,8 @@ python-greenlet python-iso8601 python-kombu #rhel6 gets via pip -python-paste # dist:f16,f17,f18,f19 -python-paste-deploy # dist:f16,f17,f18,f19 +python-paste # dist:f18,f19,f20 +python-paste-deploy # dist:f18,f19,f20 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/nova b/files/rpms/nova index c99f3defc8..ac70ac5d6f 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -28,11 +28,11 @@ python-kombu python-lockfile python-migrate python-mox -python-paramiko # dist:f16,f17,f18,f19 +python-paramiko # dist:f18,f19,f20 # ^ on RHEL, brings in python-crypto which conflicts with version from # pip we need -python-paste # dist:f16,f17,f18,f19 -python-paste-deploy # dist:f16,f17,f18,f19 +python-paste # dist:f18,f19,f20 +python-paste-deploy # dist:f18,f19,f20 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/swift b/files/rpms/swift index b137f30dce..32432bca9b 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -9,7 +9,7 @@ python-eventlet python-greenlet python-netifaces python-nose -python-paste-deploy # dist:f16,f17,f18,f19 +python-paste-deploy # dist:f18,f19,f20 python-simplejson python-webob pyxattr diff --git a/files/rpms/tempest b/files/rpms/tempest index de32b81504..e7bbd43cd6 100644 --- a/files/rpms/tempest +++ b/files/rpms/tempest @@ -1 +1 @@ -libxslt-dev \ No newline at end of file +libxslt-devel diff --git a/files/rpms/trove b/files/rpms/trove index 09dcee8104..c5cbdea012 100644 --- a/files/rpms/trove +++ b/files/rpms/trove @@ -1 +1 @@ -libxslt1-dev # testonly +libxslt-devel # testonly diff --git a/stack.sh b/stack.sh index ce5fbd47e5..4e12c45523 100755 --- a/stack.sh +++ b/stack.sh @@ -12,7 +12,7 @@ # developer install. # To keep this script simple we assume you are running on a recent **Ubuntu** -# (12.04 Precise or newer) or **Fedora** (F16 or newer) machine. (It may work +# (12.04 Precise or newer) or **Fedora** (F18 or newer) machine. (It may work # on other platforms but support for those platforms is left to those who added # them to DevStack.) It should work in a VM or physical server. Additionally # we maintain a list of ``apt`` and ``rpm`` dependencies and other configuration @@ -131,7 +131,7 @@ disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f16|f17|f18|f19|opensuse-12.2|rhel6) ]]; then +if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f18|f19|f20|opensuse-12.2|rhel6) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 1b0eccdf75cf70a26c1b2ae6b9beaa75ebaf7a6a Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Fri, 10 Jan 2014 11:51:01 +0100 Subject: [PATCH 0345/4438] Fix Heat/Cloud formation catalog template Cloud formation and Heat API ports where mixed. Change-Id: I029592c4821bb93c8a1dd91519f30908efd56627 Closes-Bug: #1267355 --- files/default_catalog.templates | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/files/default_catalog.templates b/files/default_catalog.templates index 277904a8e3..debcedfb5b 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -47,7 +47,12 @@ catalog.RegionOne.image.adminURL = http://%SERVICE_HOST%:9292 catalog.RegionOne.image.internalURL = http://%SERVICE_HOST%:9292 catalog.RegionOne.image.name = Image Service -catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8000/v1 -catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8000/v1 -catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.cloudformation.publicURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.cloudformation.adminURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.cloudformation.internalURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.cloudformation.name = Heat CloudFormation Service + +catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s +catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s +catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s catalog.RegionOne.orchestration.name = Heat Service From f69c6f16d21ce51eb5939ea6fecd99a8b28b426b Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 9 Jan 2014 19:47:54 -0500 Subject: [PATCH 0346/4438] Enable server-side and client-side logs for libvirt Need this to diagnose libvirt Errors in the gate Change-Id: Id46137a71d17abc8bfab66b14ab567d81a31f018 Related-Bug: #1254872 --- lib/nova | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/nova b/lib/nova index e754341bad..162212da59 100644 --- a/lib/nova +++ b/lib/nova @@ -648,6 +648,14 @@ function start_nova_compute() { fi if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + # Enable client side traces for libvirt + export LIBVIRT_LOG_FILTERS="1:libvirt" + export LIBVIRT_LOG_OUTPUTS="1:file:/var/log/libvirt/libvirtd-nova.log" + + # Enable server side traces for libvirtd + echo "log_filters=\"1:libvirt 1:qemu\"" | sudo tee -a /etc/libvirt/libvirtd.conf + echo "log_outputs=\"1:file:/var/log/libvirt/libvirtd.log\"" | sudo tee -a /etc/libvirt/libvirtd.conf + # The group **$LIBVIRT_GROUP** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'" From 63e1784354a49ca45bb4ae9465d2cb6dfb31db12 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Fri, 10 Jan 2014 14:23:03 +0100 Subject: [PATCH 0347/4438] Put cinder rootwrap config in separate function Separate out Cinder's rootwrap configuration so that it can be called from Grenade's upgrade scripts. This follows the same model as Nova uses with configure_nova_rootwrap() which can be called from Grenade to refresh rootwrap config. Change-Id: Id808abc2b5754443362b3de4b3453e305d3720f3 --- lib/cinder | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/lib/cinder b/lib/cinder index cbe732e9b0..6f5fb188c9 100644 --- a/lib/cinder +++ b/lib/cinder @@ -163,15 +163,8 @@ function cleanup_cinder() { fi } -# configure_cinder() - Set config files, create data dirs, etc -function configure_cinder() { - if [[ ! -d $CINDER_CONF_DIR ]]; then - sudo mkdir -p $CINDER_CONF_DIR - fi - sudo chown $STACK_USER $CINDER_CONF_DIR - - cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR - +# configure_cinder_rootwrap() - configure Cinder's rootwrap +function configure_cinder_rootwrap() { # Set the paths of certain binaries CINDER_ROOTWRAP=$(get_rootwrap_location cinder) if [[ ! -x $CINDER_ROOTWRAP ]]; then @@ -214,6 +207,18 @@ function configure_cinder() { chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap +} + +# configure_cinder() - Set config files, create data dirs, etc +function configure_cinder() { + if [[ ! -d $CINDER_CONF_DIR ]]; then + sudo mkdir -p $CINDER_CONF_DIR + fi + sudo chown $STACK_USER $CINDER_CONF_DIR + + cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR + + configure_cinder_rootwrap cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI From 9fc8792b0ac7525b4c353b0a55b8b80eabf76e2a Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 22 May 2013 17:19:06 -0500 Subject: [PATCH 0348/4438] Robustify service shutdown * Save PID when using screen in screen_it() * Add screen_stop() * Call out service stop_*() in unstack.sh functions so screen_stop() can do its thing Closes-bug: 1183449 Change-Id: Iac84231cfda960c4197de5b6e8ba6eb19225169a --- functions | 33 +++++++++++++++++++++++++++++++-- lib/ceilometer | 2 +- lib/cinder | 2 +- lib/glance | 4 ++-- lib/heat | 2 +- lib/keystone | 2 +- lib/nova | 2 +- lib/trove | 2 +- stackrc | 3 +++ unstack.sh | 47 ++++++++++++++++++++++++++++++++++------------- 10 files changed, 76 insertions(+), 23 deletions(-) diff --git a/functions b/functions index 6f09685efb..92b61ed974 100644 --- a/functions +++ b/functions @@ -1132,10 +1132,39 @@ function screen_it { sleep 1.5 NL=`echo -ne '\015'` - screen -S $SCREEN_NAME -p $1 -X stuff "$2 || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" + # This fun command does the following: + # - the passed server command is backgrounded + # - the pid of the background process is saved in the usual place + # - the server process is brought back to the foreground + # - if the server process exits prematurely the fg command errors + # and a message is written to stdout and the service failure file + # The pid saved can be used in screen_stop() as a process group + # id to kill off all child processes + screen -S $SCREEN_NAME -p $1 -X stuff "$2 & echo \$! >$SERVICE_DIR/$SCREEN_NAME/$1.pid; fg || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" else # Spawn directly without screen - run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$service.pid + run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid + fi + fi +} + + +# Stop a service in screen +# screen_stop service +function screen_stop() { + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + USE_SCREEN=$(trueorfalse True $USE_SCREEN) + + if is_service_enabled $1; then + # Kill via pid if we have one available + if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then + pkill -TERM -P $(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid) + rm $SERVICE_DIR/$SCREEN_NAME/$1.pid + fi + if [[ "$USE_SCREEN" = "True" ]]; then + # Clean up the screen window + screen -S $SCREEN_NAME -p $1 -X kill fi fi } diff --git a/lib/ceilometer b/lib/ceilometer index fac3be14a9..211303f57c 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -162,7 +162,7 @@ function start_ceilometer() { function stop_ceilometer() { # Kill the ceilometer screen windows for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do - screen -S $SCREEN_NAME -p $serv -X kill + screen_stop $serv done } diff --git a/lib/cinder b/lib/cinder index cbe732e9b0..11414bedd3 100644 --- a/lib/cinder +++ b/lib/cinder @@ -556,7 +556,7 @@ function start_cinder() { function stop_cinder() { # Kill the cinder screen windows for serv in c-api c-bak c-sch c-vol; do - screen -S $SCREEN_NAME -p $serv -X kill + screen_stop $serv done if is_service_enabled c-vol; then diff --git a/lib/glance b/lib/glance index 135136db7e..80868ae5c5 100644 --- a/lib/glance +++ b/lib/glance @@ -206,8 +206,8 @@ function start_glance() { # stop_glance() - Stop running processes function stop_glance() { # Kill the Glance screen windows - screen -S $SCREEN_NAME -p g-api -X kill - screen -S $SCREEN_NAME -p g-reg -X kill + screen_stop g-api + screen_stop g-reg } diff --git a/lib/heat b/lib/heat index e44a618162..29cd967fe1 100644 --- a/lib/heat +++ b/lib/heat @@ -175,7 +175,7 @@ function start_heat() { function stop_heat() { # Kill the screen windows for serv in h-eng h-api h-api-cfn h-api-cw; do - screen -S $SCREEN_NAME -p $serv -X kill + screen_stop $serv done } diff --git a/lib/keystone b/lib/keystone index 29b9604efe..dc6a730f16 100644 --- a/lib/keystone +++ b/lib/keystone @@ -421,7 +421,7 @@ function start_keystone() { # stop_keystone() - Stop running processes function stop_keystone() { # Kill the Keystone screen window - screen -S $SCREEN_NAME -p key -X kill + screen_stop key } diff --git a/lib/nova b/lib/nova index 39685a835a..178f8ee19c 100644 --- a/lib/nova +++ b/lib/nova @@ -705,7 +705,7 @@ function stop_nova() { # Some services are listed here twice since more than one instance # of a service may be running in certain configs. for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta; do - screen -S $SCREEN_NAME -p $serv -X kill + screen_stop $serv done if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then stop_nova_hypervisor diff --git a/lib/trove b/lib/trove index f8e3eddfe2..870afbe7bd 100644 --- a/lib/trove +++ b/lib/trove @@ -198,7 +198,7 @@ function start_trove() { function stop_trove() { # Kill the trove screen windows for serv in tr-api tr-tmgr tr-cond; do - screen -S $SCREEN_NAME -p $serv -X kill + screen_stop $serv done } diff --git a/stackrc b/stackrc index 3fdc566ed2..49fb26b2c7 100644 --- a/stackrc +++ b/stackrc @@ -9,6 +9,9 @@ DEST=/opt/stack # Destination for working data DATA_DIR=${DEST}/data +# Destination for status files +SERVICE_DIR=${DEST}/status + # Determine stack user if [[ $EUID -eq 0 ]]; then STACK_USER=stack diff --git a/unstack.sh b/unstack.sh index 67c8b7c7b1..77dbe074d2 100755 --- a/unstack.sh +++ b/unstack.sh @@ -36,6 +36,9 @@ source $TOP_DIR/lib/apache # Get project function libraries source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/keystone +source $TOP_DIR/lib/glance +source $TOP_DIR/lib/nova source $TOP_DIR/lib/horizon source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron @@ -75,21 +78,29 @@ if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then teardown_neutron_debug fi -# Shut down devstack's screen to get the bulk of OpenStack services in one shot -SCREEN=$(which screen) -if [[ -n "$SCREEN" ]]; then - SESSION=$(screen -ls | awk '/[0-9].stack/ { print $1 }') - if [[ -n "$SESSION" ]]; then - screen -X -S $SESSION quit - fi +# Call service stop +if is_service_enabled trove; then + stop_trove +fi + +if is_service_enabled heat; then + stop_heat fi -# Shut down Nova hypervisor plugins after Nova -NOVA_PLUGINS=$TOP_DIR/lib/nova_plugins -if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then - # Load plugin - source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER - stop_nova_hypervisor +if is_service_enabled ceilometer; then + stop_ceilometer +fi + +if is_service_enabled nova; then + stop_nova +fi + +if is_service_enabled g-api g-reg; then + stop_glance +fi + +if is_service_enabled key; then + stop_keystone fi # Swift runs daemons @@ -123,6 +134,7 @@ SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* # Get the iSCSI volumes if is_service_enabled cinder; then + stop_cinder cleanup_cinder fi @@ -152,4 +164,13 @@ if is_service_enabled trove; then cleanup_trove fi +# Clean up the remainder of the screen processes +SCREEN=$(which screen) +if [[ -n "$SCREEN" ]]; then + SESSION=$(screen -ls | awk '/[0-9].stack/ { print $1 }') + if [[ -n "$SESSION" ]]; then + screen -X -S $SESSION quit + fi +fi + cleanup_tmp From 2bb483d32ec0876f071550a3fc755436d1661681 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 3 Jan 2014 09:41:27 -0500 Subject: [PATCH 0349/4438] clean up ubuntu versions oneiric is long dead, remove references to it whenever possible (one more subtle issue in cinder should be a seperate patch). This includes removing the oneiric only tool build_uec.sh. also remove the bulk of references to quantal, which is 8 months out of support. note: raring only has support for the rest of the month. Change-Id: Ib17502be7572af76dc95560615221b48b970a547 --- files/apts/cinder | 2 +- files/apts/glance | 1 - files/apts/n-cpu | 2 +- files/apts/neutron | 2 +- files/apts/tls-proxy | 2 +- lib/rpc_backend | 5 +- stack.sh | 3 +- tools/build_uec.sh | 302 ----------------------------------------- tools/get_uec_image.sh | 6 +- 9 files changed, 9 insertions(+), 316 deletions(-) delete mode 100755 tools/build_uec.sh diff --git a/files/apts/cinder b/files/apts/cinder index f8e3b6d06d..712fee99ec 100644 --- a/files/apts/cinder +++ b/files/apts/cinder @@ -4,4 +4,4 @@ qemu-utils libpq-dev python-dev open-iscsi -open-iscsi-utils # Deprecated since quantal dist:lucid,oneiric,precise +open-iscsi-utils # Deprecated since quantal dist:precise diff --git a/files/apts/glance b/files/apts/glance index 26826a53c7..22787bc5a2 100644 --- a/files/apts/glance +++ b/files/apts/glance @@ -9,7 +9,6 @@ python-dev python-eventlet python-routes python-greenlet -python-argparse # dist:oneiric python-sqlalchemy python-wsgiref python-pastedeploy diff --git a/files/apts/n-cpu b/files/apts/n-cpu index 88e0144079..29e37603b7 100644 --- a/files/apts/n-cpu +++ b/files/apts/n-cpu @@ -2,7 +2,7 @@ nbd-client lvm2 open-iscsi -open-iscsi-utils # Deprecated since quantal dist:lucid,oneiric,precise +open-iscsi-utils # Deprecated since quantal dist:precise genisoimage sysfsutils sg3-utils diff --git a/files/apts/neutron b/files/apts/neutron index 0f4b69f8ef..5760113c8c 100644 --- a/files/apts/neutron +++ b/files/apts/neutron @@ -18,7 +18,7 @@ python-mysqldb python-pyudev python-qpid # dist:precise dnsmasq-base -dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise,quantal +dnsmasq-utils # for dhcp_release only available in dist:precise rabbitmq-server # NOPRIME qpid # NOPRIME sqlite3 diff --git a/files/apts/tls-proxy b/files/apts/tls-proxy index 0a44015925..8fca42d124 100644 --- a/files/apts/tls-proxy +++ b/files/apts/tls-proxy @@ -1 +1 @@ -stud # only available in dist:precise,quantal +stud # only available in dist:precise diff --git a/lib/rpc_backend b/lib/rpc_backend index ae83e85e89..f59c80096f 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -192,9 +192,8 @@ function qpid_is_supported() { GetDistro fi - # Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is - # not in openSUSE either right now. - ( ! ([[ "$DISTRO" = "oneiric" ]] || is_suse) ) + # Qpid is not in openSUSE + ( ! is_suse ) } diff --git a/stack.sh b/stack.sh index 7c065719c4..c303dc3927 100755 --- a/stack.sh +++ b/stack.sh @@ -131,7 +131,7 @@ disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f18|f19|f20|opensuse-12.2|rhel6) ]]; then +if [[ ! ${DISTRO} =~ (precise|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f18|f19|f20|opensuse-12.2|rhel6) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" @@ -1203,7 +1203,6 @@ fi # See https://help.ubuntu.com/community/CloudInit for more on cloud-init # # Override ``IMAGE_URLS`` with a comma-separated list of UEC images. -# * **oneiric**: http://uec-images.ubuntu.com/oneiric/current/oneiric-server-cloudimg-amd64.tar.gz # * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz if is_service_enabled g-reg; then diff --git a/tools/build_uec.sh b/tools/build_uec.sh deleted file mode 100755 index bce051a0b7..0000000000 --- a/tools/build_uec.sh +++ /dev/null @@ -1,302 +0,0 @@ -#!/usr/bin/env bash - -# **build_uec.sh** - -# Make sure that we have the proper version of ubuntu (only works on oneiric) -if ! egrep -q "oneiric" /etc/lsb-release; then - echo "This script only works with ubuntu oneiric." - exit 1 -fi - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $TOOLS_DIR/..; pwd) - -# Import common functions -. $TOP_DIR/functions - -cd $TOP_DIR - -# Source params -source ./stackrc - -# Ubuntu distro to install -DIST_NAME=${DIST_NAME:-oneiric} - -# Configure how large the VM should be -GUEST_SIZE=${GUEST_SIZE:-10G} - -# exit on error to stop unexpected errors -set -o errexit -set -o xtrace - -# Abort if localrc is not set -if [ ! -e $TOP_DIR/localrc ]; then - echo "You must have a localrc with ALL necessary passwords defined before proceeding." - echo "See stack.sh for required passwords." - exit 1 -fi - -# Install deps if needed -DEPS="kvm libvirt-bin kpartx cloud-utils curl" -apt_get install -y --force-yes $DEPS || true # allow this to fail gracefully for concurrent builds - -# Where to store files and instances -WORK_DIR=${WORK_DIR:-/opt/uecstack} - -# Where to store images -image_dir=$WORK_DIR/images/$DIST_NAME -mkdir -p $image_dir - -# Start over with a clean base image, if desired -if [ $CLEAN_BASE ]; then - rm -f $image_dir/disk -fi - -# Get the base image if it does not yet exist -if [ ! -e $image_dir/disk ]; then - $TOOLS_DIR/get_uec_image.sh -r $GUEST_SIZE $DIST_NAME $image_dir/disk $image_dir/kernel -fi - -# Copy over dev environment if COPY_ENV is set. -# This will also copy over your current devstack. -if [ $COPY_ENV ]; then - cd $TOOLS_DIR - ./copy_dev_environment_to_uec.sh $image_dir/disk -fi - -# Option to warm the base image with software requirements. -if [ $WARM_CACHE ]; then - cd $TOOLS_DIR - ./warm_apts_for_uec.sh $image_dir/disk -fi - -# Name of our instance, used by libvirt -GUEST_NAME=${GUEST_NAME:-devstack} - -# Mop up after previous runs -virsh destroy $GUEST_NAME || true - -# Where this vm is stored -vm_dir=$WORK_DIR/instances/$GUEST_NAME - -# Create vm dir and remove old disk -mkdir -p $vm_dir -rm -f $vm_dir/disk - -# Create a copy of the base image -qemu-img create -f qcow2 -b $image_dir/disk $vm_dir/disk - -# Back to devstack -cd $TOP_DIR - -GUEST_NETWORK=${GUEST_NETWORK:-1} -GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes} -GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50} -GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24} -GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0} -GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.$GUEST_NETWORK.1} -GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $GUEST_NETWORK`"} -GUEST_RAM=${GUEST_RAM:-1524288} -GUEST_CORES=${GUEST_CORES:-1} - -# libvirt.xml configuration -NET_XML=$vm_dir/net.xml -NET_NAME=${NET_NAME:-devstack-$GUEST_NETWORK} -cat > $NET_XML < - $NET_NAME - - - - - - - - -EOF - -if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then - virsh net-destroy $NET_NAME || true - # destroying the network isn't enough to delete the leases - rm -f /var/lib/libvirt/dnsmasq/$NET_NAME.leases - virsh net-create $vm_dir/net.xml -fi - -# libvirt.xml configuration -LIBVIRT_XML=$vm_dir/libvirt.xml -cat > $LIBVIRT_XML < - $GUEST_NAME - $GUEST_RAM - - hvm - $image_dir/kernel - root=/dev/vda ro console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud-net;s=http://192.168.$GUEST_NETWORK.1:4567/ ubuntu-pass=ubuntu - - - - - - $GUEST_CORES - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -EOF - - -rm -rf $vm_dir/uec -cp -r $TOOLS_DIR/uec $vm_dir/uec - -# set metadata -cat > $vm_dir/uec/meta-data< $vm_dir/uec/user-data<> $vm_dir/uec/user-data< localrc < /opt/stack/.ssh/authorized_keys -chown -R $STACK_USER /opt/stack -chmod 700 /opt/stack/.ssh -chmod 600 /opt/stack/.ssh/authorized_keys - -grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || - echo "#includedir /etc/sudoers.d" >> /etc/sudoers -( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \ - > /etc/sudoers.d/50_stack_sh ) -EOF -fi - -# Run stack.sh -cat >> $vm_dir/uec/user-data< Date: Sun, 12 Jan 2014 19:35:43 +0000 Subject: [PATCH 0350/4438] Skip Nova exercises if Nova is not enabled This allows for ./exercises.sh to complete sucessfully when nova is not enabled / installed. Change-Id: If969e14f5106c15007146e8fad1da27d131828c8 --- exercises/aggregates.sh | 4 ++++ exercises/bundle.sh | 4 ++++ exercises/euca.sh | 4 ++++ exercises/floating_ips.sh | 4 ++++ exercises/sec_groups.sh | 4 ++++ 5 files changed, 20 insertions(+) diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index 1b1ac06678..d223301f35 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -43,6 +43,10 @@ source $TOP_DIR/exerciserc # Test as the admin user . $TOP_DIR/openrc admin admin +# If nova api is not enabled we exit with exitcode 55 so that +# the exercise is skipped +is_service_enabled n-api || exit 55 + # Cells does not support aggregates. is_service_enabled n-cell && exit 55 diff --git a/exercises/bundle.sh b/exercises/bundle.sh index b83678ab1f..5470960b91 100755 --- a/exercises/bundle.sh +++ b/exercises/bundle.sh @@ -39,6 +39,10 @@ rm -f $TOP_DIR/cacert.pem rm -f $TOP_DIR/cert.pem rm -f $TOP_DIR/pk.pem +# If nova api is not enabled we exit with exitcode 55 so that +# the exercise is skipped +is_service_enabled n-api || exit 55 + # Get Certificates nova x509-get-root-cert $TOP_DIR/cacert.pem nova x509-create-cert $TOP_DIR/pk.pem $TOP_DIR/cert.pem diff --git a/exercises/euca.sh b/exercises/euca.sh index ed521e4f7f..51b2644458 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -41,6 +41,10 @@ fi # Import exercise configuration source $TOP_DIR/exerciserc +# If nova api is not enabled we exit with exitcode 55 so that +# the exercise is skipped +is_service_enabled n-api || exit 55 + # Skip if the hypervisor is Docker [[ "$VIRT_DRIVER" == "docker" ]] && exit 55 diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 7055278f35..4ca90a5c35 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -38,6 +38,10 @@ fi # Import exercise configuration source $TOP_DIR/exerciserc +# If nova api is not enabled we exit with exitcode 55 so that +# the exercise is skipped +is_service_enabled n-api || exit 55 + # Skip if the hypervisor is Docker [[ "$VIRT_DRIVER" == "docker" ]] && exit 55 diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh index eb32cc7aa7..d71a1e0755 100755 --- a/exercises/sec_groups.sh +++ b/exercises/sec_groups.sh @@ -33,6 +33,10 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc +# If nova api is not enabled we exit with exitcode 55 so that +# the exercise is skipped +is_service_enabled n-api || exit 55 + # Skip if the hypervisor is Docker [[ "$VIRT_DRIVER" == "docker" ]] && exit 55 From 38d1f2339a88c389e4be44fc00e59f25a62fec14 Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Wed, 8 Jan 2014 09:54:13 -0500 Subject: [PATCH 0351/4438] Add Marconi to Tempest config This patch adds queuing to tempest config, provided queuing is available in devstack. Change-Id: I2925a07d312c1f8ab2fe465f74f0bef9299eef40 Implements: blueprint add-basic-marconi-tests --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 08c0553f03..ef9dfe218b 100644 --- a/lib/tempest +++ b/lib/tempest @@ -329,7 +329,7 @@ function configure_tempest() { iniset $TEMPEST_CONF network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}" # service_available - for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna trove; do + for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna trove marconi; do if is_service_enabled $service ; then iniset $TEMPEST_CONF service_available $service "True" else From d2bcbea5f95377043b0dcdba330501d7b81a4561 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 13 Jan 2014 11:22:41 -0600 Subject: [PATCH 0352/4438] Updates for tools/info.sh * Handle local.conf localrc section * remove blank lines * rather than removing password lines, just remove the password itself to at least show which password vars have been set Change-Id: Ieca9baaf03e53b23e336944ad0ed2581c9bee460 --- tools/info.sh | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/tools/info.sh b/tools/info.sh index 14ab8f6306..3ab7966ab4 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -85,8 +85,8 @@ done # Packages # -------- -# - We are going to check packages only for the services needed. -# - We are parsing the packages files and detecting metadatas. +# - Only check packages for the services enabled +# - Parse version info from the package metadata, not the package/file names for p in $(get_packages $ENABLED_SERVICES); do if [[ "$os_PACKAGE" = "deb" ]]; then @@ -141,9 +141,15 @@ rm $FREEZE_FILE # Dump localrc with 'localrc|' prepended and comments and passwords left out if [[ -r $TOP_DIR/localrc ]]; then + RC=$TOP_DIR/localrc +elif [[ -f $RC_DIR/.localrc.auto ]]; then + RC=$TOP_DIR/.localrc.auto +fi +if [[ -n $RC ]]; then sed -e ' - /PASSWORD/d; + /^[ \t]*$/d; + /PASSWORD/s/=.*$/=\/; /^#/d; s/^/localrc\|/; - ' $TOP_DIR/localrc + ' $RC fi From 279295c72c4e7028fc6eac75412b9b5f92cd630b Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Tue, 14 Jan 2014 11:37:51 +0000 Subject: [PATCH 0353/4438] Fix duplicated rootwrap.d in lib/ironic The Ironic setup of devstack is duplicating the rootwrap.d directory at /etc/ironic/rootwrap.d/rootwrap.d, this will cause the ironic-rootwrap command to fail to execute. This patch is removing the duplicated rootwrap.d directory. Change-Id: I24844c24620b5b33ad1a6acd0d872e9df11d6d89 Closes-Bug: #1268930 --- lib/ironic | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/ironic b/lib/ironic index 1ff3c81f06..afbc3e09e4 100644 --- a/lib/ironic +++ b/lib/ironic @@ -33,7 +33,6 @@ IRONIC_AUTH_CACHE_DIR=${IRONIC_AUTH_CACHE_DIR:-/var/cache/ironic} IRONIC_CONF_DIR=${IRONIC_CONF_DIR:-/etc/ironic} IRONIC_CONF_FILE=$IRONIC_CONF_DIR/ironic.conf IRONIC_ROOTWRAP_CONF=$IRONIC_CONF_DIR/rootwrap.conf -IRONIC_ROOTWRAP_FILTERS=$IRONIC_CONF_DIR/rootwrap.d IRONIC_POLICY_JSON=$IRONIC_CONF_DIR/policy.json # Support entry points installation of console scripts @@ -118,7 +117,7 @@ function configure_ironic_api() { # Sets conductor specific settings. function configure_ironic_conductor() { cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF - cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_ROOTWRAP_FILTERS + cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR iniset $IRONIC_CONF DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF } From ef1e08022b9553b07757005e7a5103fbdc0d99f0 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Thu, 2 Jan 2014 16:33:53 -0800 Subject: [PATCH 0354/4438] Add sanity check framework to verify neutron server/backend integration Some Neutron plugins require controllers and multiple backend services to operate correctly. This patch adds the framework for third party plugins to run sanity checks after Neutron Server has started. This simple addition may reveal potential configuration pitfalls much earlier in the dev/test cycle, thus speeding up the build churn process. The first plugin that uses this framework is the VMware NSX one. Closes-bug: #1265671 Change-Id: I17f9c5c8e828316ff03f0eff42ae4ae6c6c58733 --- lib/neutron | 5 +++++ lib/neutron_thirdparty/README.md | 3 +++ lib/neutron_thirdparty/bigswitch_floodlight | 4 ++++ lib/neutron_thirdparty/midonet | 4 ++++ lib/neutron_thirdparty/ryu | 4 ++++ lib/neutron_thirdparty/trema | 4 ++++ lib/neutron_thirdparty/vmware_nsx | 4 ++++ stack.sh | 1 + 8 files changed, 29 insertions(+) diff --git a/lib/neutron b/lib/neutron index 43f43f951a..81faa103b5 100644 --- a/lib/neutron +++ b/lib/neutron @@ -958,6 +958,11 @@ function stop_neutron_third_party() { _neutron_third_party_do stop } +# check_neutron_third_party_integration() - Check that third party integration is sane +function check_neutron_third_party_integration() { + _neutron_third_party_do check +} + # Restore xtrace $XTRACE diff --git a/lib/neutron_thirdparty/README.md b/lib/neutron_thirdparty/README.md index b289f58c5d..2460e5cac7 100644 --- a/lib/neutron_thirdparty/README.md +++ b/lib/neutron_thirdparty/README.md @@ -34,3 +34,6 @@ functions to be implemented * ``stop_``: stop running processes (non-screen) + +* ``check_``: + verify that the integration between neutron server and third-party components is sane diff --git a/lib/neutron_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight index ebde0673b8..1fd4fd801a 100644 --- a/lib/neutron_thirdparty/bigswitch_floodlight +++ b/lib/neutron_thirdparty/bigswitch_floodlight @@ -45,5 +45,9 @@ function stop_bigswitch_floodlight() { : } +function check_bigswitch_floodlight() { + : +} + # Restore xtrace $MY_XTRACE diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet index 7928bca31f..e672528a2d 100644 --- a/lib/neutron_thirdparty/midonet +++ b/lib/neutron_thirdparty/midonet @@ -56,5 +56,9 @@ function stop_midonet() { : } +function check_midonet() { + : +} + # Restore xtrace $MY_XTRACE diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu index 3b825a10c1..5edf273361 100644 --- a/lib/neutron_thirdparty/ryu +++ b/lib/neutron_thirdparty/ryu @@ -75,5 +75,9 @@ function stop_ryu() { : } +function check_ryu() { + : +} + # Restore xtrace $MY_XTRACE diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema index bdc23568fb..2b125646dc 100644 --- a/lib/neutron_thirdparty/trema +++ b/lib/neutron_thirdparty/trema @@ -109,5 +109,9 @@ function stop_trema() { sudo TREMA_TMP=$TREMA_TMP_DIR trema killall } +function check_trema() { + : +} + # Restore xtrace $MY_XTRACE diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx index 70d348274f..7c6202723f 100644 --- a/lib/neutron_thirdparty/vmware_nsx +++ b/lib/neutron_thirdparty/vmware_nsx @@ -78,5 +78,9 @@ function stop_vmware_nsx() { done } +function check_vmware_nsx() { + : +} + # Restore xtrace $MY_XTRACE diff --git a/stack.sh b/stack.sh index 7da41a98c8..621a058444 100755 --- a/stack.sh +++ b/stack.sh @@ -1116,6 +1116,7 @@ fi if is_service_enabled q-svc; then echo_summary "Starting Neutron" start_neutron_service_and_check + check_neutron_third_party_integration elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then NM_CONF=${NOVA_CONF} if is_service_enabled n-cell; then From 5eec5b6b80401842ad1f7275d9c7a6949cc6f848 Mon Sep 17 00:00:00 2001 From: Gordon Chung Date: Tue, 14 Jan 2014 11:05:31 -0500 Subject: [PATCH 0355/4438] command not found errors on unstack - add lib/ceilometer and lib/heat to source list for when stop_heat and stop_ceilometer functions are called. - add lib/tls source to lib/keystone for when is_ssl_enabled_service function called. Change-Id: Ief05766e9cfda71fb6392c8a757d04751283414e Closes-Bug: #1269047 --- lib/keystone | 1 + unstack.sh | 2 ++ 2 files changed, 3 insertions(+) diff --git a/lib/keystone b/lib/keystone index a7e5d66808..ceefe6a144 100644 --- a/lib/keystone +++ b/lib/keystone @@ -28,6 +28,7 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace +source $TOP_DIR/lib/tls # Defaults # -------- diff --git a/unstack.sh b/unstack.sh index 77dbe074d2..4445f1fb31 100755 --- a/unstack.sh +++ b/unstack.sh @@ -35,10 +35,12 @@ source $TOP_DIR/lib/apache # Get project function libraries source $TOP_DIR/lib/baremetal +source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/cinder source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova +source $TOP_DIR/lib/heat source $TOP_DIR/lib/horizon source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron From 52a7b6ecbad11c08dcd77a6fcd8bfef6a20324a9 Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Tue, 14 Jan 2014 18:52:51 +0100 Subject: [PATCH 0356/4438] Run neutron-debug with admin tenant in neutron-adv-test Because neutron-debug create-probe needs admin role only, demo tenants cannot create ports. neutron-debug is wrapped in order to run it only with admin tenant. Change-Id: Ib65e8639858c597345c6a5fdc0192b40f34a0300 Closes-Bug: #1269090 --- exercises/neutron-adv-test.sh | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index 0c0d42f458..1343f11553 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -185,6 +185,14 @@ function confirm_server_active { fi } +function neutron_debug_admin { + local os_username=$OS_USERNAME + local os_tenant_id=$OS_TENANT_ID + source $TOP_DIR/openrc admin admin + neutron-debug $@ + source $TOP_DIR/openrc $os_username $os_tenant_id +} + function add_tenant { local TENANT=$1 local USER=$2 @@ -241,7 +249,7 @@ function create_network { local NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA" neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR - neutron-debug probe-create --device-owner compute $NET_ID + neutron_debug_admin probe-create --device-owner compute $NET_ID source $TOP_DIR/openrc demo demo } From 55d9b9a9517ebe8c37f82136ff5eb7b781929325 Mon Sep 17 00:00:00 2001 From: Shiv Haris Date: Tue, 14 Jan 2014 11:33:28 -0800 Subject: [PATCH 0357/4438] Fix typo NEUTON to NEUTRON Fixes bug: #1269111 Change-Id: Icf66b4d474698b5f3ca22bc656ecd12d03164bce --- lib/neutron_plugins/brocade | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade index f9275cacc2..8e18d04984 100644 --- a/lib/neutron_plugins/brocade +++ b/lib/neutron_plugins/brocade @@ -38,7 +38,7 @@ function neutron_plugin_configure_l3_agent() { } function neutron_plugin_configure_plugin_agent() { - AGENT_BINARY="$NEUTON_BIN_DIR/neutron-linuxbridge-agent" + AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-linuxbridge-agent" } function neutron_plugin_setup_interface_driver() { From b4a215cce2c649ce811893f5e57b7ee6c55158e8 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Fri, 10 Jan 2014 16:39:32 +0900 Subject: [PATCH 0358/4438] Sanitize language settings To avoid commands bailing out with "unsupported locale setting" errors. Change-Id: I54ae4cd84a0a4b4875533181b1d96563a1604775 --- stack.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/stack.sh b/stack.sh index 7da41a98c8..c52514413c 100755 --- a/stack.sh +++ b/stack.sh @@ -23,6 +23,13 @@ # Make sure custom grep options don't get in the way unset GREP_OPTIONS +# Sanitize language settings to avoid commands bailing out +# with "unsupported locale setting" errors. +unset LANG +unset LANGUAGE +LC_ALL=C +export LC_ALL + # Keep track of the devstack directory TOP_DIR=$(cd $(dirname "$0") && pwd) From d5a5460888869eb22cc6f2622c3adbf492680971 Mon Sep 17 00:00:00 2001 From: Steven Dake Date: Wed, 15 Jan 2014 10:56:51 -0700 Subject: [PATCH 0359/4438] Revert "Change the libvirtd log level to DEBUG" Suggested by Daniel Berrange in this thread: http://lists.openstack.org/pipermail/openstack-dev/2014-January/024407.html This reverts commit 3bd85c9d6e257fc952cb3c6d0c09e199685bd5ed. Change-Id: I370ba61cf8a00b51684cd504fed4ba4078d868be --- lib/nova_plugins/hypervisor-libvirt | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index ef40e7ab4c..6f90f4ac17 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -93,9 +93,6 @@ EOF" fi fi - # Change the libvirtd log level to DEBUG. - sudo sed -i s/"#log_level = 3"/"log_level = 1"/ /etc/libvirt/libvirtd.conf - # The user that nova runs as needs to be member of **libvirtd** group otherwise # nova-compute will be unable to use libvirt. if ! getent group $LIBVIRT_GROUP >/dev/null; then From 2394605a635c86c9a90f683f1f3a3ee718d17d5f Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 15 Jan 2014 21:42:32 +0000 Subject: [PATCH 0360/4438] Typo: funstions=>functions Change-Id: I59caf62b049d09450ce3236648cf1ede2f48e7f5 --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 43f43f951a..5dc5703f3c 100644 --- a/lib/neutron +++ b/lib/neutron @@ -1,5 +1,5 @@ # lib/neutron -# functions - funstions specific to neutron +# functions - functions specific to neutron # Dependencies: # ``functions`` file From 14daa57d67fed6dc98b833f4c3698fef8ff7f312 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 15 Jan 2014 21:43:25 +0000 Subject: [PATCH 0361/4438] Remove old DEFAULT.root_helper setting root_helper is now under the agent group and not DEFAULT Change-Id: I11867f7ceff1f3b8b0bc2ef8aa508b6ecee653fc --- lib/neutron | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/neutron b/lib/neutron index 43f43f951a..fd61d140d3 100644 --- a/lib/neutron +++ b/lib/neutron @@ -611,9 +611,6 @@ function _configure_neutron_debug_command() { iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT verbose False iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT debug False iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND" - # Intermediate fix until Neutron patch lands and then line above will - # be cleaned. iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper "$Q_RR_COMMAND" _neutron_setup_interface_driver $NEUTRON_TEST_CONFIG_FILE From fe4c4f7a9e6d1a4f26c67b8e1609fc5e80c5ef83 Mon Sep 17 00:00:00 2001 From: john-griffith Date: Wed, 15 Jan 2014 11:24:03 -0700 Subject: [PATCH 0362/4438] Update cinder cert script to use run_tempest Changes to tempest run_tests.sh (commit: 17520e49a7e69b3817856a739121a1fb2906f2cc) breaks the cinder_driver_cert script. A backward compatible run_tempest.sh script was added, so for now we should update the cinder_driver_cert script to use that Change-Id: I611a01dd4788ae01da8a6167a530f9e44733dfc6 Closes-Bug: #1269531 --- driver_certs/cinder_driver_cert.sh | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh index 18bef8b3b5..edcc6d4800 100755 --- a/driver_certs/cinder_driver_cert.sh +++ b/driver_certs/cinder_driver_cert.sh @@ -2,6 +2,22 @@ # **cinder_cert.sh** +# This script is a simple wrapper around the tempest volume api tests +# It requires that you have a working and functional devstack install +# and that you've enabled your device driver by making the necessary +# modifications to /etc/cinder/cinder.conf + +# This script will refresh your openstack repo's and restart the cinder +# services to pick up your driver changes. +# please NOTE; this script assumes your devstack install is functional +# and includes tempest. A good first step is to make sure you can +# create volumes on your device before you even try and run this script. + +# It also assumes default install location (/opt/stack/xxx) +# to aid in debug, you should also verify that you've added +# an output directory for screen logs: +# SCREEN_LOGDIR=/opt/stack/screen-logs + CERT_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=$(cd $CERT_DIR/..; pwd) @@ -73,9 +89,9 @@ start_cinder sleep 5 # run tempest api/volume/test_* -log_message "Run the actual tempest volume tests (run_tests.sh -N tempest.api.volume.test_*)...", True +log_message "Run the actual tempest volume tests (./tools/pretty_tox.sh api.volume_*)...", True exec 2> >(tee -a $TEMPFILE) -`./run_tests.sh -N tempest.api.volume.test_*` +`./tools/pretty_tox.sh api.volume` if [[ $? = 0 ]]; then log_message "CONGRATULATIONS!!! Device driver PASSED!", True log_message "Submit output: ($TEMPFILE)" From a0a23311c3c40f631663468e1ba45d5e84790019 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 15 Jan 2014 15:24:30 -0500 Subject: [PATCH 0363/4438] updated sar options to collect more data in order to have better data on the load state of the test nodes we should track things beyond just cpu time. Add in load time, process creation rates, and io rates during the tests. also add a sar filter that makes it report on one line reading sar input with multiple flags is somewhat problematic, because it's tons of interspersed headers. So build something with does a pivot filter to make it possible to get this all on one line. Change-Id: I8f085cedda65dfc37ad530eb97ba1fc5577314c3 --- stack.sh | 12 +++++-- tools/sar_filter.py | 82 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+), 3 deletions(-) create mode 100755 tools/sar_filter.py diff --git a/stack.sh b/stack.sh index 7da41a98c8..382b75e7fc 100755 --- a/stack.sh +++ b/stack.sh @@ -860,11 +860,17 @@ init_service_check # ------- # If enabled, systat has to start early to track OpenStack service startup. -if is_service_enabled sysstat;then +if is_service_enabled sysstat; then + # what we want to measure + # -u : cpu statitics + # -q : load + # -b : io load rates + # -w : process creation and context switch rates + SYSSTAT_OPTS="-u -q -b -w" if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it sysstat "cd ; sar -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" + screen_it sysstat "cd $TOP_DIR; ./tools/sar_filter.py $SYSSTAT_OPTS -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" else - screen_it sysstat "sar $SYSSTAT_INTERVAL" + screen_it sysstat "./tools/sar_filter.py $SYSSTAT_OPTS $SYSSTAT_INTERVAL" fi fi diff --git a/tools/sar_filter.py b/tools/sar_filter.py new file mode 100755 index 0000000000..ed8c19687c --- /dev/null +++ b/tools/sar_filter.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python +# +# Copyright 2014 Samsung Electronics Corp. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re +import subprocess +import sys + + +def is_data_line(line): + timestamp, data = parse_line(line) + return re.search('\d\.d', data) + + +def parse_line(line): + m = re.search('(\d\d:\d\d:\d\d \w\w)(\s+((\S+)\s*)+)', line) + if m: + date = m.group(1) + data = m.group(2).rstrip() + return date, data + else: + return None, None + + +process = subprocess.Popen( + "sar %s" % " ".join(sys.argv[1:]), + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + +# Poll process for new output until finished + +start_time = "" +header = "" +data_line = "" +printed_header = False +current_ts = None +while True: + nextline = process.stdout.readline() + if nextline == '' and process.poll() is not None: + break + + date, data = parse_line(nextline) + # stop until we get to the first set of real lines + if not date: + continue + + # now we eat the header lines, and only print out the header + # if we've never seen them before + if not start_time: + start_time = date + header += "%s %s" % (date, data) + elif date == start_time: + header += " %s" % data + elif not printed_header: + printed_header = True + print header + + # now we know this is a data line, printing out if the timestamp + # has changed, and stacking up otherwise. + nextline = process.stdout.readline() + date, data = parse_line(nextline) + if date != current_ts: + current_ts = date + print data_line + data_line = "%s %s" % (date, data) + else: + data_line += " %s" % data + + sys.stdout.flush() From 0049c0c434b4672963b6622486c6c638259bdfda Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Thu, 16 Jan 2014 18:16:48 -0600 Subject: [PATCH 0364/4438] Make unstack.sh more like stack.sh unstack.sh and stack.sh both have to "configure projects", but the code was different. This change makes it so the 2 sections of the files are the same. Change-Id: Ia06f8bbfbe2a6e87fb406e34e13a39bd7fa9e5af --- lib/keystone | 2 -- stack.sh | 6 +++++- unstack.sh | 23 +++++++++++++++++------ 3 files changed, 22 insertions(+), 9 deletions(-) diff --git a/lib/keystone b/lib/keystone index ceefe6a144..71ac668ce5 100644 --- a/lib/keystone +++ b/lib/keystone @@ -28,8 +28,6 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace -source $TOP_DIR/lib/tls - # Defaults # -------- diff --git a/stack.sh b/stack.sh index 7da41a98c8..50a4cd2af9 100755 --- a/stack.sh +++ b/stack.sh @@ -305,9 +305,13 @@ rm -f $SSL_BUNDLE_FILE # Configure Projects # ================== -# Source project function libraries +# Import apache functions source $TOP_DIR/lib/apache + +# Import TLS functions source $TOP_DIR/lib/tls + +# Source project function libraries source $TOP_DIR/lib/infra source $TOP_DIR/lib/oslo source $TOP_DIR/lib/stackforge diff --git a/unstack.sh b/unstack.sh index 4445f1fb31..31f6f01c8f 100755 --- a/unstack.sh +++ b/unstack.sh @@ -30,20 +30,31 @@ if [[ $EUID -eq 0 ]]; then exit 1 fi + +# Configure Projects +# ================== + # Import apache functions source $TOP_DIR/lib/apache -# Get project function libraries -source $TOP_DIR/lib/baremetal -source $TOP_DIR/lib/ceilometer -source $TOP_DIR/lib/cinder +# Import TLS functions +source $TOP_DIR/lib/tls + +# Source project function libraries +source $TOP_DIR/lib/infra +source $TOP_DIR/lib/oslo +source $TOP_DIR/lib/stackforge +source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova -source $TOP_DIR/lib/heat -source $TOP_DIR/lib/horizon +source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift +source $TOP_DIR/lib/ceilometer +source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron +source $TOP_DIR/lib/baremetal +source $TOP_DIR/lib/ldap source $TOP_DIR/lib/ironic source $TOP_DIR/lib/trove From 04f6dc24a7845ee139977fa5b0c5e53aad8e99bd Mon Sep 17 00:00:00 2001 From: Emilien Macchi Date: Thu, 16 Jan 2014 18:03:38 -0500 Subject: [PATCH 0365/4438] Fix stop_neutron metadata agent function Currently, stop_neutron fails in Jenkins because it kills itself. This patch ensure we kill only neutron metadata agent, and not the awk process in itself. Change-Id: I25d1d90e002fa9eb3c5bc366cc74cb70a2daa69f Closes-bug: #1269982 Signed-off-by: Emilien Macchi --- lib/neutron | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index 43f43f951a..a909b8b81c 100644 --- a/lib/neutron +++ b/lib/neutron @@ -505,8 +505,7 @@ function stop_neutron() { [ ! -z "$pid" ] && sudo kill -9 $pid fi if is_service_enabled q-meta; then - pid=$(ps aux | awk '/neutron-ns-metadata-proxy/ { print $2 }') - [ ! -z "$pid" ] && sudo kill -9 $pid + pkill -9 -f neutron-ns-metadata-proxy fi if is_service_enabled q-lbaas; then From 39d500335ad2bff0ffdf1d543d0d7528b3812480 Mon Sep 17 00:00:00 2001 From: Ana Krivokapic Date: Mon, 6 Jan 2014 21:46:35 +0100 Subject: [PATCH 0366/4438] Add missing mongodb client package on Fedora On Fedora, when ceilometer is enabled and mongodb is used as backend, devstack installation would fail due to missing mongodb client package. This patch ensures the package gets installed. Change-Id: I981bb55f86541e5ff19c52160269a7789b94423f --- files/rpms/ceilometer-collector | 1 + lib/ceilometer | 2 ++ 2 files changed, 3 insertions(+) diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector index d7b7ea89c1..c91bac36a2 100644 --- a/files/rpms/ceilometer-collector +++ b/files/rpms/ceilometer-collector @@ -1,3 +1,4 @@ selinux-policy-targeted mongodb-server pymongo +mongodb # NOPRIME diff --git a/lib/ceilometer b/lib/ceilometer index 75058c05a5..d0f00c07eb 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -151,6 +151,8 @@ function configure_ceilometer() { function configure_mongodb() { if is_fedora; then + # install mongodb client + install_package mongodb # ensure smallfiles selected to minimize freespace requirements sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod From 9acb965e572d672f1d5632ee92768b4708b03fbd Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Sun, 19 Jan 2014 11:05:08 +1300 Subject: [PATCH 0367/4438] Do not set bind_host for heat APIs This results in the APIs binding to 0.0.0.0 which is what other devstack services bind to anyway. Change-Id: Ic229dbed02b224fe7c5e14f20998bb5d5987aa39 Closes-Bug: #1172991 --- lib/heat | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/heat b/lib/heat index e35305b843..0307c64ae1 100644 --- a/lib/heat +++ b/lib/heat @@ -110,15 +110,12 @@ function configure_heat() { [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_CONF paste_deploy flavor standalone # OpenStack API - iniset $HEAT_CONF heat_api bind_host $HEAT_API_HOST iniset $HEAT_CONF heat_api bind_port $HEAT_API_PORT # Cloudformation API - iniset $HEAT_CONF heat_api_cfn bind_host $HEAT_API_CFN_HOST iniset $HEAT_CONF heat_api_cfn bind_port $HEAT_API_CFN_PORT # Cloudwatch API - iniset $HEAT_CONF heat_api_cloudwatch bind_host $HEAT_API_CW_HOST iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT # heat environment From cf903938eceb0188c9ecd405e6c89b63b1c8910d Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Mon, 20 Jan 2014 18:18:58 +0100 Subject: [PATCH 0368/4438] Added missing sudo when killing ns-metadata Closes-bug: #1269982 Change-Id: Ib6b641a8d5c92fb4a8aaed6b5d7b63e66acd6bd9 --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 465b57cc35..4b280d1d53 100644 --- a/lib/neutron +++ b/lib/neutron @@ -505,7 +505,7 @@ function stop_neutron() { [ ! -z "$pid" ] && sudo kill -9 $pid fi if is_service_enabled q-meta; then - pkill -9 -f neutron-ns-metadata-proxy + sudo pkill -9 neutron-ns-metadata-proxy || : fi if is_service_enabled q-lbaas; then From c75c78ad5d0473bc97bf859810ddfc18bf270aa2 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Tue, 21 Jan 2014 15:01:01 +0000 Subject: [PATCH 0369/4438] Add xenserver image By adding a separate entry for xenserver, it will enforce the gate to cache cirros-0.3.0-x86_64-disk.vhd.tgz. Change-Id: Ibfd4618e98f079a53fc286f5e95f18a3d658e4d2 --- stackrc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stackrc b/stackrc index 49fb26b2c7..8a0280ecfa 100644 --- a/stackrc +++ b/stackrc @@ -284,6 +284,9 @@ case "$VIRT_DRIVER" in vsphere) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-debian-2.6.32-i686} IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/debian-2.6.32-i686.vmdk"};; + xenserver) + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-disk} + IMAGE_URLS=${IMAGE_URLS:-"https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz"};; *) # Default to Cirros with kernel, ramdisk and disk image DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.1-x86_64-uec} IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};; From e7a94efe77bf6738fcb778f36cf18ceb82a0fae6 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 21 Jan 2014 13:17:24 -0500 Subject: [PATCH 0370/4438] disable client side libvirt debug logging and tune server side libvirt logging to the values that danpb suggested would be useful on the openstack-dev mailing list. Change-Id: I4b1c780d1dd4d2eecc81fabe42c07cc2a9e0e3f4 --- lib/nova | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/nova b/lib/nova index a50878950c..a4edb53cf8 100644 --- a/lib/nova +++ b/lib/nova @@ -650,12 +650,11 @@ function start_nova_compute() { if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then # Enable client side traces for libvirt - export LIBVIRT_LOG_FILTERS="1:libvirt" - export LIBVIRT_LOG_OUTPUTS="1:file:/var/log/libvirt/libvirtd-nova.log" - + local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" + local log_outputs="1:file:/var/log/libvirt/libvirtd.log" # Enable server side traces for libvirtd - echo "log_filters=\"1:libvirt 1:qemu\"" | sudo tee -a /etc/libvirt/libvirtd.conf - echo "log_outputs=\"1:file:/var/log/libvirt/libvirtd.log\"" | sudo tee -a /etc/libvirt/libvirtd.conf + echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf + echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf # The group **$LIBVIRT_GROUP** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. From 1b6ba540887ab73432488f5d81339227052c423c Mon Sep 17 00:00:00 2001 From: ZhiQiang Fan Date: Wed, 22 Jan 2014 22:56:59 +0800 Subject: [PATCH 0371/4438] Remove unnecessary slash from ceilometer endpoint The last slash in ceilometer endpoint is not needed, it should be removed because it will generate redundant slash which has been treated as a bug in ceilometer. Change-Id: Ifcff9b63921f5b1dda667d8e77aab22ca2928a8b Closes-Bug: #1271556 ref: https://review.openstack.org/#/c/63279/ --- lib/ceilometer | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 75058c05a5..18f146eb90 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -85,9 +85,9 @@ create_ceilometer_accounts() { keystone endpoint-create \ --region RegionOne \ --service_id $CEILOMETER_SERVICE \ - --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ - --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ - --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" + --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" \ + --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" \ + --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" fi fi } From 4968d1ad5d8d6b0537c68548eb5f8c08bc33f63a Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Wed, 22 Jan 2014 19:06:44 -0600 Subject: [PATCH 0372/4438] Keystone use common logging setup The Keystone setup was using logging.conf to configure logging, unlike other projects. This may have been left over from before Keystone switched to oslo logging. Switching to common logging configuration allows: - Common format for logs for easier parsing - Pretty colorized logs - Keystone can control the default logging levels for libraries that are used by setting the defaults in keystone. - Potentially using a function to setup logging for all components using oslo-logging (e.g., share with lib/nova). Change-Id: I4e9b1e6cffce30f16a1e039224312852b8abda07 Closes-Bug: #1271775 Closes-Bug: #1269987 --- lib/keystone | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/keystone b/lib/keystone index ceefe6a144..7f0bcf24a7 100644 --- a/lib/keystone +++ b/lib/keystone @@ -247,14 +247,14 @@ function configure_keystone() { fi # Set up logging - LOGGING_ROOT="devel" if [ "$SYSLOG" != "False" ]; then - LOGGING_ROOT="$LOGGING_ROOT,production" + iniset $KEYSTONE_CONF DEFAULT use_syslog "True" + fi + + # Format logging + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + setup_colorized_logging $KEYSTONE_CONF DEFAULT fi - KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_CONF_DIR/logging.conf" - cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_CONF_DIR/logging.conf - iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG" - iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production" if is_apache_enabled_service key; then _config_keystone_apache_wsgi @@ -412,7 +412,7 @@ function start_keystone() { screen_it key "cd $KEYSTONE_DIR && sudo tail -f /var/log/$APACHE_NAME/keystone" else # Start Keystone in a screen window - screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG --debug" + screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF --debug" fi echo "Waiting for keystone to start..." From 0b8f6e0fbba961de04d95ff0e06e515d1ea3ea8b Mon Sep 17 00:00:00 2001 From: IWAMOTO Toshihiro Date: Thu, 23 Jan 2014 12:02:34 +0900 Subject: [PATCH 0373/4438] Make sure not to revert local changes. "git diff --quiet" has a bug ignoring local changes if there's a unchanged file with a newer timestamp. This patch works around the bug. Change-Id: I0ddc24e0f7af21287c43c1e04dd166ebff6f2dca Closes-Bug: 1264422 --- functions | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 92b61ed974..eb92a6c615 100644 --- a/functions +++ b/functions @@ -1301,7 +1301,8 @@ function setup_develop() { echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" # Don't update repo if local changes exist - (cd $project_dir && git diff --quiet) + # Don't use buggy "git diff --quiet" + (cd $project_dir && git diff --exit-code >/dev/null) local update_requirements=$? if [ $update_requirements -eq 0 ]; then From ab491bcc88acd83e9fa21de1d4a3fe60bfba577a Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Tue, 21 Jan 2014 11:18:11 +0900 Subject: [PATCH 0374/4438] Add get-pip.py/*.qcow2 to .gitignore files/get-pip.py and *.qcow2 are installed by DevStack itself. So we shouldn't manage it with the git repository. Change-Id: Ib22ed814d3d3eb33ef3ff45874b0ff36b2036cf5 --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 43652024f3..1840352636 100644 --- a/.gitignore +++ b/.gitignore @@ -7,8 +7,10 @@ src localrc local.sh files/*.gz +files/*.qcow2 files/images files/pip-* +files/get-pip.py stack-screenrc *.pem accrc From 55c468c422ae7bc48f46847d6fa21e53d4673259 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Thu, 23 Jan 2014 15:01:50 +0400 Subject: [PATCH 0375/4438] Use DATABASE/connection opt for db url in Savanna DATABASE/sql_connection opt is now deprecated. Change-Id: I58058f0d51e16de53e6472c8c01065438d709edc --- lib/savanna | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/savanna b/lib/savanna index bb4dfe693d..57d8ac39ce 100644 --- a/lib/savanna +++ b/lib/savanna @@ -96,8 +96,7 @@ function configure_savanna() { iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG recreate_database savanna utf8 - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database sql_connection `database_connection_url savanna` - inicomment $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database connection + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database connection `database_connection_url savanna` if is_service_enabled neutron; then iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_neutron true From fe42255bfac23a74890c2c7d8cfef385428cef32 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Thu, 23 Jan 2014 14:18:54 +0400 Subject: [PATCH 0376/4438] Use savanna-db-manage to init db for Savanna It uses alembic migrations to initialize database. Change-Id: I6cf01f69c6bc7c9e403040607dd397cfc3b574a4 --- lib/savanna | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/savanna b/lib/savanna index 57d8ac39ce..c7d59f79c4 100644 --- a/lib/savanna +++ b/lib/savanna @@ -95,7 +95,6 @@ function configure_savanna() { iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG - recreate_database savanna utf8 iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database connection `database_connection_url savanna` if is_service_enabled neutron; then @@ -104,6 +103,9 @@ function configure_savanna() { fi iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG + + recreate_database savanna utf8 + $SAVANNA_BIN_DIR/savanna-db-manage --config-file $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE upgrade head } # install_savanna() - Collect source and prepare From 579af5d6786f62008807a473749600e88cea21fc Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 23 Jan 2014 11:32:22 -0600 Subject: [PATCH 0377/4438] Kill process groups in screen_stop() Previously only the top child process was killed, killing the process group also takes all of the child processes with it. Closes-bug: 1271889 Change-Id: If1864cc4f1944f417ea3473d81d8b6e8e40030c2 --- functions | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 92b61ed974..13d021e147 100644 --- a/functions +++ b/functions @@ -1150,6 +1150,9 @@ function screen_it { # Stop a service in screen +# If a PID is available use it, kill the whole process group via TERM +# If screen is being used kill the screen window; this will catch processes +# that did not leave a PID behind # screen_stop service function screen_stop() { SCREEN_NAME=${SCREEN_NAME:-stack} @@ -1159,7 +1162,7 @@ function screen_stop() { if is_service_enabled $1; then # Kill via pid if we have one available if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then - pkill -TERM -P $(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid) + pkill -TERM -P -$(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid) rm $SERVICE_DIR/$SCREEN_NAME/$1.pid fi if [[ "$USE_SCREEN" = "True" ]]; then From c3e5b77b45068ed07e53fdda1276f5c863de5973 Mon Sep 17 00:00:00 2001 From: Flavio Percoco Date: Thu, 23 Jan 2014 13:48:16 +0100 Subject: [PATCH 0378/4438] Add missing file argument to iniset_multiline Change Id9aab356b36b2150312324a0349d120bbbbd4e63 introduced a call to iniset_multiline to enable swift stores explicitly. However, the call has a missing file argument which resulted in this call setting the values to the wrong file, section and param. This patch fixes that. Change-Id: Ib17048e05c467bc8ca2c13fe4297d6bac6c8a880 --- lib/glance | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index 21c1fa595a..55d5fb37ec 100644 --- a/lib/glance +++ b/lib/glance @@ -125,7 +125,7 @@ function configure_glance() { iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True - iniset_multiline DEFAULT known_stores glance.store.filesystem.Store glance.store.http.Store glance.store.swift.Store + iniset $GLANCE_API_CONF DEFAULT known_stores "glance.store.filesystem.Store, glance.store.http.Store, glance.store.swift.Store" fi cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI From bdeadf59d4273515df0f47edb820ff159bbc5380 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Thu, 23 Jan 2014 17:41:18 +0000 Subject: [PATCH 0379/4438] Add pidstat support pidstat is a script that comes from sysstat, but will give us per-process information. Allow enabling "pidstat" that will run pidstat to give info every 5 seconds by default. Change-Id: I5ec7d5abce81125b55985bba3ccaf8073ccdfa2a --- stack.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/stack.sh b/stack.sh index efdee64b34..1d02c16ff8 100755 --- a/stack.sh +++ b/stack.sh @@ -291,6 +291,9 @@ SYSLOG_PORT=${SYSLOG_PORT:-516} SYSSTAT_FILE=${SYSSTAT_FILE:-"sysstat.dat"} SYSSTAT_INTERVAL=${SYSSTAT_INTERVAL:-"1"} +PIDSTAT_FILE=${PIDSTAT_FILE:-"pidstat.txt"} +PIDSTAT_INTERVAL=${PIDSTAT_INTERVAL:-"5"} + # Use color for logging output (only available if syslog is not used) LOG_COLOR=`trueorfalse True $LOG_COLOR` @@ -874,6 +877,16 @@ if is_service_enabled sysstat; then fi fi +if is_service_enabled pidstat; then + # Per-process stats + PIDSTAT_OPTS="-l -p ALL -T ALL" + if [[ -n ${SCREEN_LOGDIR} ]]; then + screen_it pidstat "cd $TOP_DIR; pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL > $SCREEN_LOGDIR/$PIDSTAT_FILE" + else + screen_it pidstat "pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL" + fi +fi + # Start Services # ============== From b93cd643432d3633c48bec02fcd7cb4f354f67ed Mon Sep 17 00:00:00 2001 From: Arnaud Legendre Date: Thu, 23 Jan 2014 17:12:21 -0800 Subject: [PATCH 0380/4438] upload_image.sh should parse filenames correctly The upload_image script gives the ability to the user to provide specific metadata using the filename: file-adapter_type;disk_type;network_type.vmdk Currently, the regex expects each of these types to be populated. This patch fixes this issue by making the regex more flexible and accepts only one of these metadata to be populated. Change-Id: If74cb06cc640864e7e91fd88943cdb37e05935d6 Closes-Bug: #1272126 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 92b61ed974..276cea1e04 100644 --- a/functions +++ b/functions @@ -1539,7 +1539,7 @@ function upload_image() { # NOTE: For backwards compatibility reasons, colons may be used in place # of semi-colons for property delimiters but they are not permitted # characters in NTFS filesystems. - property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).+[:;].+[:;].+$'` + property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$'` IFS=':;' read -a props <<< "$property_string" vmdk_disktype="${props[0]:-$vmdk_disktype}" vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}" From ab0595e9cd8f9bc77a3bb7e6c9611c2c771b0781 Mon Sep 17 00:00:00 2001 From: Gordon Chung Date: Thu, 16 Jan 2014 09:44:57 -0500 Subject: [PATCH 0381/4438] ERRORs in ceilometer-acentral log after succesful tempest run recent merge added duplicate creation of ceilometer user. remove ceilometer user creation from keystone_data so we can correctly add ResellerAdmin role to ceilometer user which it needs to interact with swift Change-Id: I043c6b9337dfb147c3c8f364b462708a4030b41c Closes-Bug: #1268730 --- files/keystone_data.sh | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 07b6b601d2..d477c42906 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -6,7 +6,6 @@ # ------------------------------------------------------------------ # service glance admin # service heat service # if enabled -# service ceilometer admin # if enabled # Tempest Only: # alt_demo alt_demo Member # @@ -113,30 +112,11 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then fi # Ceilometer -if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then - keystone user-create --name=ceilometer \ - --pass="$SERVICE_PASSWORD" \ - --tenant $SERVICE_TENANT_NAME \ - --email=ceilometer@example.com - keystone user-role-add --tenant $SERVICE_TENANT_NAME \ - --user ceilometer \ - --role admin +if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then # Ceilometer needs ResellerAdmin role to access swift account stats. keystone user-role-add --tenant $SERVICE_TENANT_NAME \ --user ceilometer \ --role ResellerAdmin - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - keystone service-create \ - --name=ceilometer \ - --type=metering \ - --description="Ceilometer Service" - keystone endpoint-create \ - --region RegionOne \ - --service ceilometer \ - --publicurl "http://$SERVICE_HOST:8777" \ - --adminurl "http://$SERVICE_HOST:8777" \ - --internalurl "http://$SERVICE_HOST:8777" - fi fi # EC2 From dc4dc7f03335e26ea3d86b6184f0475cc5f3d51b Mon Sep 17 00:00:00 2001 From: john-griffith Date: Wed, 22 Jan 2014 18:09:32 -0700 Subject: [PATCH 0382/4438] Fix up tempest conf settings The tempest api.volume.test_volume_types test won't work with non-default drivers configured for cinder's backend any more. The reason is that we create a type using capability scheduler keywords in the extra-specs for the test; (vendor_name and storage_protocol). The result is the extra-spec uses the filters: "vendor_name=Open Source" and "storage_protocol=iSCSI", but for example if you have another backend say SolidFire, EMC, NetApp, IBM etc the capabilities filter will fail the create with a "No valid host available". This is intended to work by simply setting these values in your tempest.conf file. That's fine, however upon setting this up in my localrc I found that the tempest config variables being set via devtsack were never picked up Currently devstack doesn't use the same variable names for configuration variables as tempest expects. Devstack is using the variable "TEMPEST_CONF" however the Tempest project is expecting the variable "TEMPEST_CONFIG", so currently the devstack lib/tempest rc variables are never picked up by tempest properly. This change modifes devstack's naming of TEMPEST_CONF, my though being that since this doesn't work in devstack currently that changing it here would be better than changing it in Tempest where it's possible people had their own custoizations already outside of devstack. In addition this change creates rc variables in devstack to actually set these via devstack. The idea here is that Cinder 3'rd party testing needs to be a simple devstack config and run stack.sh. By fixing up the configuration file variable naming and adding the variables for the vendor and protocol settings that's now possible. An example localrc for a custom config is shown below. The example sets the tempest config file to /etc/tempest/tempest.conf, and configures tempest to use the SolidFire driver as the cinder backend. TEMPEST_VOLUME_VENDOR ==> tempest.conf.volume_vendor TEMPEST_STORAGE_PROTOCOL ==> tempest.conf.storage_protocol relevant example localrc entries: TEMPEST_CONFIG=/etc/tempest/tempest.conf TEMPEST_CONFIG_DIR=/etc/tempest TEMPEST_VOLUME_DRIVER=solidfire TEMPEST_VOLUME_VENDOR="SolidFire Inc" ***NOTE*** storage_protocol and vendor_name MUST match what the backend device reports from get capabilities. Change-Id: I28dfa90c877b27f5d4919f2748fae092bb2f87fa Closes-Bug: 1271781 --- lib/tempest | 141 +++++++++++++++++++++++++++++----------------------- 1 file changed, 78 insertions(+), 63 deletions(-) diff --git a/lib/tempest b/lib/tempest index ef9dfe218b..a13cf10e84 100644 --- a/lib/tempest +++ b/lib/tempest @@ -46,8 +46,8 @@ set +o xtrace # Set up default directories TEMPEST_DIR=$DEST/tempest -TEMPEST_CONF_DIR=$TEMPEST_DIR/etc -TEMPEST_CONF=$TEMPEST_CONF_DIR/tempest.conf +TEMPEST_CONFIG_DIR=${TEMPEST_CONFIG_DIR:-$TEMPEST_DIR/etc} +TEMPEST_CONFIG=$TEMPEST_CONFIG_DIR/tempest.conf TEMPEST_STATE_PATH=${TEMPEST_STATE_PATH:=$DATA_DIR/tempest} NOVA_SOURCE_DIR=$DEST/nova @@ -58,6 +58,10 @@ BUILD_TIMEOUT=196 BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-0.3.1" +# Cinder/Volume variables +TEMPEST_VOLUME_DRIVER=${TEMPEST_VOLUME_DRIVER:-default} +TEMPEST_VOLUME_VENDOR=${TEMPEST_VOLUME_VENDOR:-"Open Source"} +TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-iSCSI} # Functions # --------- @@ -83,6 +87,11 @@ function configure_tempest() { local boto_instance_type="m1.tiny" local ssh_connect_method="fixed" + if [[ ! -d $TEMPEST_CONFIG_DIR ]]; then + sudo mkdir -p $TEMPEST_CONFIG_DIR + fi + sudo chown $STACK_USER $TEMPEST_CONFIG_DIR + # TODO(afazekas): # sudo python setup.py deploy @@ -133,7 +142,8 @@ function configure_tempest() { # Create tempest.conf from tempest.conf.sample # copy every time, because the image UUIDS are going to change - cp $TEMPEST_CONF.sample $TEMPEST_CONF + sudo cp $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG + sudo chmod 644 $TEMPEST_CONFIG password=${ADMIN_PASSWORD:-secrete} @@ -224,121 +234,126 @@ function configure_tempest() { fi # Oslo - iniset $TEMPEST_CONF DEFAULT lock_path $TEMPEST_STATE_PATH + iniset $TEMPEST_CONFIG DEFAULT lock_path $TEMPEST_STATE_PATH mkdir -p $TEMPEST_STATE_PATH - iniset $TEMPEST_CONF DEFAULT use_stderr False - iniset $TEMPEST_CONF DEFAULT log_file tempest.log - iniset $TEMPEST_CONF DEFAULT debug True + iniset $TEMPEST_CONFIG DEFAULT use_stderr False + iniset $TEMPEST_CONFIG DEFAULT log_file tempest.log + iniset $TEMPEST_CONFIG DEFAULT debug True # Timeouts - iniset $TEMPEST_CONF compute build_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONF volume build_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONF boto build_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONF compute build_interval $BUILD_INTERVAL - iniset $TEMPEST_CONF volume build_interval $BUILD_INTERVAL - iniset $TEMPEST_CONF boto build_interval $BUILD_INTERVAL - iniset $TEMPEST_CONF boto http_socket_timeout 5 + iniset $TEMPEST_CONFIG compute build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONFIG volume build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONFIG boto build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONFIG compute build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONFIG volume build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONFIG boto build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONFIG boto http_socket_timeout 5 # Identity - iniset $TEMPEST_CONF identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" - iniset $TEMPEST_CONF identity password "$password" - iniset $TEMPEST_CONF identity alt_username $ALT_USERNAME - iniset $TEMPEST_CONF identity alt_password "$password" - iniset $TEMPEST_CONF identity alt_tenant_name $ALT_TENANT_NAME - iniset $TEMPEST_CONF identity admin_password "$password" + iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" + iniset $TEMPEST_CONFIG identity password "$password" + iniset $TEMPEST_CONFIG identity alt_username $ALT_USERNAME + iniset $TEMPEST_CONFIG identity alt_password "$password" + iniset $TEMPEST_CONFIG identity alt_tenant_name $ALT_TENANT_NAME + iniset $TEMPEST_CONFIG identity admin_password "$password" # Image # for the gate we want to be able to override this variable so we aren't # doing an HTTP fetch over the wide internet for this test if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then - iniset $TEMPEST_CONF image http_image $TEMPEST_HTTP_IMAGE + iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE fi # Compute - iniset $TEMPEST_CONF compute change_password_available False + iniset $TEMPEST_CONFIG compute change_password_available False # Note(nati) current tempest don't create network for each tenant # so reuse same tenant for now if is_service_enabled neutron; then TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-False} fi - iniset $TEMPEST_CONF compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} - iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED - iniset $TEMPEST_CONF compute network_for_ssh $PRIVATE_NETWORK_NAME - iniset $TEMPEST_CONF compute ip_version_for_ssh 4 - iniset $TEMPEST_CONF compute ssh_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONF compute image_ref $image_uuid - iniset $TEMPEST_CONF compute image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} - iniset $TEMPEST_CONF compute image_ref_alt $image_uuid_alt - iniset $TEMPEST_CONF compute image_alt_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} - iniset $TEMPEST_CONF compute flavor_ref $flavor_ref - iniset $TEMPEST_CONF compute flavor_ref_alt $flavor_ref_alt - iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} - iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} - iniset $TEMPEST_CONF compute ssh_connect_method $ssh_connect_method + iniset $TEMPEST_CONFIG compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} + iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED + iniset $TEMPEST_CONFIG compute network_for_ssh $PRIVATE_NETWORK_NAME + iniset $TEMPEST_CONFIG compute ip_version_for_ssh 4 + iniset $TEMPEST_CONFIG compute ssh_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONFIG compute image_ref $image_uuid + iniset $TEMPEST_CONFIG compute image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid_alt + iniset $TEMPEST_CONFIG compute image_alt_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref + iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt + iniset $TEMPEST_CONFIG compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} + iniset $TEMPEST_CONFIG compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} + iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method # Compute admin - iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED + iniset $TEMPEST_CONFIG "compute-admin" password "$password" # DEPRECATED - iniset $TEMPEST_CONF network api_version 2.0 - iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable" - iniset $TEMPEST_CONF network public_network_id "$public_network_id" - iniset $TEMPEST_CONF network public_router_id "$public_router_id" - iniset $TEMPEST_CONF network default_network "$FIXED_RANGE" + iniset $TEMPEST_CONFIG network api_version 2.0 + iniset $TEMPEST_CONFIG network tenant_networks_reachable "$tenant_networks_reachable" + iniset $TEMPEST_CONFIG network public_network_id "$public_network_id" + iniset $TEMPEST_CONFIG network public_router_id "$public_router_id" + iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE" # boto - iniset $TEMPEST_CONF boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" - iniset $TEMPEST_CONF boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}" - iniset $TEMPEST_CONF boto s3_materials_path "$BOTO_MATERIALS_PATH" - iniset $TEMPEST_CONF boto instance_type "$boto_instance_type" - iniset $TEMPEST_CONF boto http_socket_timeout 30 - iniset $TEMPEST_CONF boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" + iniset $TEMPEST_CONFIG boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}" + iniset $TEMPEST_CONFIG boto s3_materials_path "$BOTO_MATERIALS_PATH" + iniset $TEMPEST_CONFIG boto instance_type "$boto_instance_type" + iniset $TEMPEST_CONFIG boto http_socket_timeout 30 + iniset $TEMPEST_CONFIG boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # Orchestration test image if [[ ! -z "$HEAT_FETCHED_TEST_IMAGE" ]]; then - iniset $TEMPEST_CONF orchestration image_ref "$HEAT_FETCHED_TEST_IMAGE" + iniset $TEMPEST_CONFIG orchestration image_ref "$HEAT_FETCHED_TEST_IMAGE" elif [[ "$HEAT_CREATE_TEST_IMAGE" = "True" ]]; then disk_image_create /usr/share/tripleo-image-elements "vm fedora heat-cfntools" "i386" "fedora-vm-heat-cfntools-tempest" - iniset $TEMPEST_CONF orchestration image_ref "fedora-vm-heat-cfntools-tempest" + iniset $TEMPEST_CONFIG orchestration image_ref "fedora-vm-heat-cfntools-tempest" fi # Scenario - iniset $TEMPEST_CONF scenario img_dir "$FILES/images/cirros-0.3.1-x86_64-uec" + iniset $TEMPEST_CONFIG scenario img_dir "$FILES/images/cirros-0.3.1-x86_64-uec" # Large Ops Number - iniset $TEMPEST_CONF scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} + iniset $TEMPEST_CONFIG scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} # Volume if is_service_enabled c-bak; then - iniset $TEMPEST_CONF volume volume_backup_enabled "True" + iniset $TEMPEST_CONFIG volume volume_backup_enabled "True" fi CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND) if [ $CINDER_MULTI_LVM_BACKEND == "True" ]; then - iniset $TEMPEST_CONF volume multi_backend_enabled "True" - iniset $TEMPEST_CONF volume backend1_name "LVM_iSCSI" - iniset $TEMPEST_CONF volume backend2_name "LVM_iSCSI_2" + iniset $TEMPEST_CONFIG volume multi_backend_enabled "True" + iniset $TEMPEST_CONFIG volume backend1_name "LVM_iSCSI" + iniset $TEMPEST_CONFIG volume backend2_name "LVM_iSCSI_2" + fi + + if [ $TEMPEST_VOLUME_DRIVER != "default" ]; then + iniset $TEMPEST_CONFIG volume vendor_name $TEMPEST_VOLUME_VENDOR + iniset $TEMPEST_CONFIG volume storage_protocol $TEMPEST_STORAGE_PROTOCOL fi # Dashboard - iniset $TEMPEST_CONF dashboard dashboard_url "http://$SERVICE_HOST/" - iniset $TEMPEST_CONF dashboard login_url "http://$SERVICE_HOST/auth/login/" + iniset $TEMPEST_CONFIG dashboard dashboard_url "http://$SERVICE_HOST/" + iniset $TEMPEST_CONFIG dashboard login_url "http://$SERVICE_HOST/auth/login/" # cli - iniset $TEMPEST_CONF cli cli_dir $NOVA_BIN_DIR + iniset $TEMPEST_CONFIG cli cli_dir $NOVA_BIN_DIR # Networking - iniset $TEMPEST_CONF network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}" + iniset $TEMPEST_CONFIG network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}" # service_available for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna trove marconi; do if is_service_enabled $service ; then - iniset $TEMPEST_CONF service_available $service "True" + iniset $TEMPEST_CONFIG service_available $service "True" else - iniset $TEMPEST_CONF service_available $service "False" + iniset $TEMPEST_CONFIG service_available $service "False" fi done echo "Created tempest configuration file:" - cat $TEMPEST_CONF + cat $TEMPEST_CONFIG # Restore IFS IFS=$ifs From db20cd5436ec6301b134f2d92053cb98fb15717b Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Sun, 5 Jan 2014 07:41:30 -0800 Subject: [PATCH 0383/4438] Add Neutron/NSX plugin sanity check Supports-blueprint: nvp-third-party-support (aka bp vmware-nsx-third-party) Related-bug: #1265671 Change-Id: Ifa4e1d36b8735e81f24b8852103a9c433d736e84 --- lib/neutron_thirdparty/vmware_nsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx index 7c6202723f..4eb177a458 100644 --- a/lib/neutron_thirdparty/vmware_nsx +++ b/lib/neutron_thirdparty/vmware_nsx @@ -79,7 +79,7 @@ function stop_vmware_nsx() { } function check_vmware_nsx() { - : + neutron-check-nsx-config $NEUTRON_CONF_DIR/plugins/vmware/nsx.ini } # Restore xtrace From 53ffc713b1d352a9ecf701b452e8e6659daf9748 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 17 Dec 2013 11:13:40 -0600 Subject: [PATCH 0384/4438] clean.sh updates * Clean out data, log and state dirs * Include lib/apache to clear is_apache_enabled_service not found error * Clean errors removing tgt config files * Clean errors removing VG backing file in lib/cinder Change-Id: I33dfde17eb8daaaed7f7e76337fe6a8085a266bf --- clean.sh | 26 ++++++++++++++++---------- lib/cinder | 4 ++-- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/clean.sh b/clean.sh index 480a81214f..e16bdb7f36 100755 --- a/clean.sh +++ b/clean.sh @@ -30,13 +30,17 @@ fi # and ``DISTRO`` GetDistro +# Import apache functions +source $TOP_DIR/lib/apache +source $TOP_DIR/lib/ldap # Import database library source $TOP_DIR/lib/database source $TOP_DIR/lib/rpc_backend -source $TOP_DIR/lib/oslo source $TOP_DIR/lib/tls + +source $TOP_DIR/lib/oslo source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance @@ -47,7 +51,9 @@ source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal -source $TOP_DIR/lib/ldap +source $TOP_DIR/lib/ironic +source $TOP_DIR/lib/trove + # Extras Source # -------------- @@ -95,13 +101,6 @@ if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then cleanup_nova_hypervisor fi -# cinder doesn't always clean up the volume group as it might be used elsewhere... -# clean it up if it is a loop device -VG_DEV=$(sudo losetup -j $DATA_DIR/${VOLUME_GROUP}-backing-file | awk -F':' '/backing-file/ { print $1}') -if [[ -n "$VG_DEV" ]]; then - sudo losetup -d $VG_DEV -fi - #if mount | grep $DATA_DIR/swift/drives; then # sudo umount $DATA_DIR/swift/drives/sdb1 #fi @@ -111,12 +110,19 @@ fi sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift # Clean out tgt -sudo rm /etc/tgt/conf.d/* +sudo rm -f /etc/tgt/conf.d/* # Clean up the message queue cleanup_rpc_backend cleanup_database +# Clean out data, logs and status +LOGDIR=$(dirname "$LOGFILE") +sudo rm -rf $DATA_DIR $LOGDIR $DEST/status +if [[ -n "$SCREEN_LOGDIR" ]] && [[ -d "$SCREEN_LOGDIR" ]]; then + sudo rm -rf $SCREEN_LOGDIR +fi + # Clean up networking... # should this be in nova? # FIXED_IP_ADDR in br100 diff --git a/lib/cinder b/lib/cinder index cbe732e9b0..45a9a25dc8 100644 --- a/lib/cinder +++ b/lib/cinder @@ -109,8 +109,8 @@ function _clean_lvm_backing_file() { # of the backing file if [ -z "`sudo lvs --noheadings -o lv_name $vg`" ]; then # if the backing physical device is a loop device, it was probably setup by devstack - VG_DEV=$(sudo losetup -j $DATA_DIR/${vg}-backing-file | awk -F':' '/backing-file/ { print $1}') - if [[ -n "$VG_DEV" ]]; then + if [[ -n "$VG_DEV" ]] && [[ -e "$VG_DEV" ]]; then + VG_DEV=$(sudo losetup -j $DATA_DIR/${vg}-backing-file | awk -F':' '/backing-file/ { print $1}') sudo losetup -d $VG_DEV rm -f $DATA_DIR/${vg}-backing-file fi From 38e38fb16d5d597e41c486812ae7ba480696b31c Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 10 Jan 2014 12:05:51 -0600 Subject: [PATCH 0385/4438] Update samples * Skip commands for services that are not started in local.sh * Rename localrc to local.conf Change-Id: Ida3a8cc836d56db94da4a133fbeb81c7f5fc5f26 --- samples/{localrc => local.conf} | 13 ++++--- samples/local.sh | 60 +++++++++++++++++---------------- 2 files changed, 39 insertions(+), 34 deletions(-) rename samples/{localrc => local.conf} (87%) diff --git a/samples/localrc b/samples/local.conf similarity index 87% rename from samples/localrc rename to samples/local.conf index 80cf0e75ac..c8126c22af 100644 --- a/samples/localrc +++ b/samples/local.conf @@ -1,19 +1,22 @@ -# Sample ``localrc`` for user-configurable variables in ``stack.sh`` +# Sample ``local.conf`` for user-configurable variables in ``stack.sh`` # NOTE: Copy this file to the root ``devstack`` directory for it to # work properly. -# ``localrc`` is a user-maintained setings file that is sourced from ``stackrc``. +# ``local.conf`` is a user-maintained setings file that is sourced from ``stackrc``. # This gives it the ability to override any variables set in ``stackrc``. # Also, most of the settings in ``stack.sh`` are written to only be set if no -# value has already been set; this lets ``localrc`` effectively override the +# value has already been set; this lets ``local.conf`` effectively override the # default values. # This is a collection of some of the settings we have found to be useful # in our DevStack development environments. Additional settings are described -# in http://devstack.org/localrc.html +# in http://devstack.org/local.conf.html # These should be considered as samples and are unsupported DevStack code. +# The ``localrc`` section replaces the old ``localrc`` configuration file. +# Note that if ``localrc`` is present it will be used in favor of this section. +[[local|localrc]] # Minimal Contents # ---------------- @@ -22,7 +25,7 @@ # there are a few minimal variables set: # If the ``*_PASSWORD`` variables are not set here you will be prompted to enter -# values for them by ``stack.sh`` and they will be added to ``localrc``. +# values for them by ``stack.sh`` and they will be added to ``local.conf``. ADMIN_PASSWORD=nomoresecrete MYSQL_PASSWORD=stackdb RABBIT_PASSWORD=stackqueue diff --git a/samples/local.sh b/samples/local.sh index 970cbb97e0..664cb663fe 100755 --- a/samples/local.sh +++ b/samples/local.sh @@ -23,45 +23,47 @@ source $TOP_DIR/stackrc # Destination path for installation ``DEST`` DEST=${DEST:-/opt/stack} +if is_service_enabled nova; then -# Import ssh keys -# --------------- + # Import ssh keys + # --------------- -# Import keys from the current user into the default OpenStack user (usually -# ``demo``) + # Import keys from the current user into the default OpenStack user (usually + # ``demo``) -# Get OpenStack auth -source $TOP_DIR/openrc + # Get OpenStack user auth + source $TOP_DIR/openrc -# Add first keypair found in localhost:$HOME/.ssh -for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do - if [[ -r $i ]]; then - nova keypair-add --pub_key=$i `hostname` - break - fi -done + # Add first keypair found in localhost:$HOME/.ssh + for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do + if [[ -r $i ]]; then + nova keypair-add --pub_key=$i `hostname` + break + fi + done -# Create A Flavor -# --------------- + # Create A Flavor + # --------------- -# Get OpenStack admin auth -source $TOP_DIR/openrc admin admin + # Get OpenStack admin auth + source $TOP_DIR/openrc admin admin -# Name of new flavor -# set in ``localrc`` with ``DEFAULT_INSTANCE_TYPE=m1.micro`` -MI_NAME=m1.micro + # Name of new flavor + # set in ``localrc`` with ``DEFAULT_INSTANCE_TYPE=m1.micro`` + MI_NAME=m1.micro -# Create micro flavor if not present -if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then - nova flavor-create $MI_NAME 6 128 0 1 -fi + # Create micro flavor if not present + if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then + nova flavor-create $MI_NAME 6 128 0 1 + fi -# Other Uses -# ---------- + # Other Uses + # ---------- -# Add tcp/22 and icmp to default security group -nova secgroup-add-rule default tcp 22 22 0.0.0.0/0 -nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0 + # Add tcp/22 and icmp to default security group + nova secgroup-add-rule default tcp 22 22 0.0.0.0/0 + nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0 +fi From fbe12f988cd1026b2f074a5b5bfe15ff19171b90 Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Sun, 26 Jan 2014 22:57:47 +0900 Subject: [PATCH 0386/4438] Add 'bc' to files/apts/general After commit def4c141, "bc" command is used in stack.sh, but 'bc' command is not available in very minimal Ubuntu installation (without any tasks installed). We need to add 'bc' to required package list. Closes-Bug: #1272914 Change-Id: I5797707e8eaa9dd2a21d1a1fc3af028d1951a2ee --- files/apts/general | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apts/general b/files/apts/general index aff687fab4..32d31f0642 100644 --- a/files/apts/general +++ b/files/apts/general @@ -21,3 +21,4 @@ euca2ools # only for testing client tar python-cmd2 # dist:precise python2.7 +bc From c38d864cfb43592a4985441cc5c3de89d572c32e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Sun, 26 Jan 2014 13:01:30 -0500 Subject: [PATCH 0387/4438] remove setting up fatal_deprecations using fatal deprecations only means you can't add new deprecations to the code base, which isn't helpful in actually deprecating features in a user friendly way. Change-Id: I26468f4c221a14f2eea746439d46e5fa192cfc57 --- lib/nova | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/nova b/lib/nova index a4edb53cf8..b85f0941f3 100644 --- a/lib/nova +++ b/lib/nova @@ -379,7 +379,6 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT metadata_workers "4" iniset $NOVA_CONF conductor workers "4" iniset $NOVA_CONF DEFAULT sql_connection `database_connection_url nova` - iniset $NOVA_CONF DEFAULT fatal_deprecations "True" iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF osapi_v3 enabled "True" From e61bc61a31ba05c9af5d0801d2f120e919e0bd5f Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Mon, 27 Jan 2014 15:21:29 +1300 Subject: [PATCH 0388/4438] Use HOST_IP instead of SERVICE_HOST for heat API conf Heat config values heat_metadata_server_url, heat_waitcondition_server_url and heat_waitcondition_server_url currently derive their host from devstack SERVICE_HOST. In gating this is set to 127.0.0.1, which would explain why nova servers are not reaching heat with waitcondition signalling. This change uses HOST_IP as the default instead of SERVICE_HOST. Change-Id: I373b086e3a36a3484cfd34f0d1c8c168ac6d465d --- lib/heat | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/heat b/lib/heat index 0307c64ae1..b9b8aa66ca 100644 --- a/lib/heat +++ b/lib/heat @@ -60,13 +60,13 @@ function configure_heat() { # remove old config files rm -f $HEAT_CONF_DIR/heat-*.conf - HEAT_API_CFN_HOST=${HEAT_API_CFN_HOST:-$SERVICE_HOST} + HEAT_API_CFN_HOST=${HEAT_API_CFN_HOST:-$HOST_IP} HEAT_API_CFN_PORT=${HEAT_API_CFN_PORT:-8000} HEAT_ENGINE_HOST=${HEAT_ENGINE_HOST:-$SERVICE_HOST} HEAT_ENGINE_PORT=${HEAT_ENGINE_PORT:-8001} - HEAT_API_CW_HOST=${HEAT_API_CW_HOST:-$SERVICE_HOST} + HEAT_API_CW_HOST=${HEAT_API_CW_HOST:-$HOST_IP} HEAT_API_CW_PORT=${HEAT_API_CW_PORT:-8003} - HEAT_API_HOST=${HEAT_API_HOST:-$SERVICE_HOST} + HEAT_API_HOST=${HEAT_API_HOST:-$HOST_IP} HEAT_API_PORT=${HEAT_API_PORT:-8004} HEAT_API_PASTE_FILE=$HEAT_CONF_DIR/api-paste.ini HEAT_POLICY_FILE=$HEAT_CONF_DIR/policy.json From daa9a734e2fe008a32ed0f98501e2ce2f80167c8 Mon Sep 17 00:00:00 2001 From: Hirofumi Ichihara Date: Mon, 27 Jan 2014 14:54:02 +0900 Subject: [PATCH 0389/4438] Repeatedly add log_filters,log_outputs to libvirtd.conf when restart Change-Id: I14f07f3164f9201305ed1e94e9277a5a5792e850 Closes-bug: 1273058 --- lib/nova | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index b85f0941f3..dbaa3f53d9 100644 --- a/lib/nova +++ b/lib/nova @@ -652,8 +652,12 @@ function start_nova_compute() { local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" local log_outputs="1:file:/var/log/libvirt/libvirtd.log" # Enable server side traces for libvirtd - echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf - echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf + if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then + echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi + if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then + echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi # The group **$LIBVIRT_GROUP** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. From 315f7b0747effbd490ff3b25d85bc6399ed290a1 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 27 Jan 2014 09:40:29 +0100 Subject: [PATCH 0390/4438] Use service postgresql initdb with el6 postgresql-setup does not exists on el6, the service postgresql initdb is the documented db init command. Change-Id: I2b92a3c8e7db603eb13378e46893fc81f507405b --- lib/databases/postgresql | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 60e5a33715..c459feb9e0 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -43,7 +43,13 @@ function configure_database_postgresql { if is_fedora; then PG_HBA=/var/lib/pgsql/data/pg_hba.conf PG_CONF=/var/lib/pgsql/data/postgresql.conf - sudo [ -e $PG_HBA ] || sudo postgresql-setup initdb + if ! sudo [ -e $PG_HBA ]; then + if ! [[ $DISTRO =~ (rhel6) ]]; then + sudo postgresql-setup initdb + else + sudo service postgresql initdb + fi + fi elif is_ubuntu; then PG_DIR=`find /etc/postgresql -name pg_hba.conf|xargs dirname` PG_HBA=$PG_DIR/pg_hba.conf From e7b6399d455ea3f44c46448449cc90d55356f23e Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 27 Jan 2014 11:44:03 +0100 Subject: [PATCH 0391/4438] Install bc with all distribution After commit def4c141 the bc is requred for devstack install on minimal image, commit fbe12f98 fixed the issue with ubuntu, but not with other distribution. Adding bc to the files/rpms-suse/general and files/rpms/general. Change-Id: Ieb2e3e2af454bca03bb3d7565ff731dc357e699f --- files/rpms-suse/general | 1 + files/rpms/general | 1 + 2 files changed, 2 insertions(+) diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 98c279581e..704947ea53 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -17,6 +17,7 @@ tcpdump unzip vim-enhanced wget +bc findutils-locate # useful when debugging lsof # useful when debugging diff --git a/files/rpms/general b/files/rpms/general index 40246ea4ab..6cfe31eaf1 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -21,6 +21,7 @@ tcpdump unzip wget which +bc # [1] : some of installed tools have unversioned dependencies on this, # but others have versioned (<=0.7). So if a later version (0.7.1) From d8416d7c1c71c82fa9c0f0e7a6518ce043bff120 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 27 Jan 2014 15:36:06 -0500 Subject: [PATCH 0392/4438] allow for upgrade of the precise kernel we are getting kernel crashes in the OpenStack gate, to test getting around this we'd like devstack to be able to upgrade the precise kernel to the latest lts backported kernel. default to off Change-Id: I5d47aa8d15b1b1c0386a13b65022f6b8108c5c49 --- tools/fixup_stuff.sh | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 5fb47dc29b..a28e10ef2d 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -69,6 +69,22 @@ if [[ -d $dir ]]; then sudo chmod +r $dir/* fi +# Ubuntu 12.04 +# ----- +# We can regularly get kernel crashes on the 12.04 default kernel, so attempt +# to install a new kernel +if [[ ${DISTRO} =~ (precise) ]]; then + # Finally, because we suspect the Precise kernel is problematic, install a new kernel + UPGRADE_KERNEL=$(trueorfalse False $UPGRADE_KERNEL) + if [[ $UPGRADE_KERNEL == "True" ]]; then + if [[ ! `uname -r` =~ (^3\.11) ]]; then + apt_get install linux-generic-lts-saucy + echo "Installing Saucy LTS kernel, please reboot before proceeding" + exit 1 + fi + fi +fi + # RHEL6 # ----- From bb8227ce69b9b040b98dbe339e4f5c02172d19ac Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 27 Jan 2014 12:21:52 -0600 Subject: [PATCH 0393/4438] Fix Swift process kill stop_swift() was not killing all swift processes properly. Change to manually clean up all screen services with pkill. Closes-bug: 1268794 Change-Id: Ibb7a2e0dd10a313609f05963264087f82f6f00e2 --- lib/swift | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/swift b/lib/swift index 44c230be93..37b630c3fa 100644 --- a/lib/swift +++ b/lib/swift @@ -652,8 +652,10 @@ function stop_swift() { if type -p swift-init >/dev/null; then swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true fi - # Dump the proxy server - sudo pkill -f swift-proxy-server + for type in proxy object container account; do + # Dump all of the servers + pkill -f swift- + done } # Restore xtrace From fc744f9713fcccfebeb52e35c7fc1ce955b89200 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 27 Jan 2014 13:45:21 -0600 Subject: [PATCH 0394/4438] Convert trove to plugin Also adds an is_trove_enabled() function to match https://review.openstack.org/69497 changes for is_service_enabled(). Change-Id: Ic0408ff6d9816aec8a3506931470470342a5dcd7 --- extras.d/70-trove | 33 +++++++++++++++++++++++++++++++++ lib/trove | 10 ++++++++++ stack.sh | 26 +------------------------- unstack.sh | 4 ---- 4 files changed, 44 insertions(+), 29 deletions(-) create mode 100644 extras.d/70-trove diff --git a/extras.d/70-trove b/extras.d/70-trove new file mode 100644 index 0000000000..a4dc7fbc5b --- /dev/null +++ b/extras.d/70-trove @@ -0,0 +1,33 @@ +# trove.sh - Devstack extras script to install Trove + +if is_service_enabled trove; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/trove + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + echo_summary "Installing Trove" + install_trove + install_troveclient + cleanup_trove + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + echo_summary "Configuring Trove" + configure_troveclient + configure_trove + + if is_service_enabled key; then + create_trove_accounts + fi + + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + # Initialize trove + init_trove + + # Start the trove API and trove taskmgr components + echo_summary "Starting Trove" + start_trove + fi + + if [[ "$1" == "unstack" ]]; then + stop_trove + fi +fi diff --git a/lib/trove b/lib/trove index 8e817f5145..9c91024211 100644 --- a/lib/trove +++ b/lib/trove @@ -38,6 +38,16 @@ else TROVE_BIN_DIR=$(get_python_exec_prefix) fi +# Functions +# --------- + +# Test if any Trove services are enabled +# is_trove_enabled +function is_trove_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"tr-" ]] && return 0 + return 1 +} + # setup_trove_logging() - Adds logging configuration to conf files function setup_trove_logging() { local CONF=$1 diff --git a/stack.sh b/stack.sh index a2469f1868..45d47c819c 100755 --- a/stack.sh +++ b/stack.sh @@ -3,7 +3,7 @@ # ``stack.sh`` is an opinionated OpenStack developer installation. It # installs and configures various combinations of **Ceilometer**, **Cinder**, # **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron**, -# **Swift**, and **Trove** +# and **Swift** # This script allows you to specify configuration options of what git # repositories to use, enabled services, network configuration and various @@ -337,7 +337,6 @@ source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap source $TOP_DIR/lib/ironic -source $TOP_DIR/lib/trove # Extras Source # -------------- @@ -739,12 +738,6 @@ if is_service_enabled heat; then configure_heat fi -if is_service_enabled trove; then - install_trove - install_troveclient - cleanup_trove -fi - if is_service_enabled tls-proxy; then configure_CA init_CA @@ -927,10 +920,6 @@ if is_service_enabled key; then create_cinder_accounts create_neutron_accounts - if is_service_enabled trove; then - create_trove_accounts - fi - if is_service_enabled ceilometer; then create_ceilometer_accounts fi @@ -1204,19 +1193,6 @@ if is_service_enabled heat; then start_heat fi -# Configure and launch the trove service api, and taskmanager -if is_service_enabled trove; then - # Initialize trove - echo_summary "Configuring Trove" - configure_troveclient - configure_trove - init_trove - - # Start the trove API and trove taskmgr components - echo_summary "Starting Trove" - start_trove -fi - # Create account rc files # ======================= diff --git a/unstack.sh b/unstack.sh index 31f6f01c8f..92d0642c38 100755 --- a/unstack.sh +++ b/unstack.sh @@ -56,7 +56,6 @@ source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap source $TOP_DIR/lib/ironic -source $TOP_DIR/lib/trove # Extras Source # -------------- @@ -92,9 +91,6 @@ if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then fi # Call service stop -if is_service_enabled trove; then - stop_trove -fi if is_service_enabled heat; then stop_heat From abb7df152328fd83924070c4c40843847fb6d87a Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Tue, 28 Jan 2014 22:38:06 +0400 Subject: [PATCH 0395/4438] Include SAVANNA_CONF_DIR into SAVANNA_CONF_FILE It's the commom way of using X_CONF_FILE variable. Change-Id: Ibc284be44ffdd25be3191913c78424cbf06b2bb0 --- lib/savanna | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/lib/savanna b/lib/savanna index c7d59f79c4..de2044318b 100644 --- a/lib/savanna +++ b/lib/savanna @@ -26,7 +26,7 @@ SAVANNA_BRANCH=${SAVANNA_BRANCH:-master} # Set up default directories SAVANNA_DIR=$DEST/savanna SAVANNA_CONF_DIR=${SAVANNA_CONF_DIR:-/etc/savanna} -SAVANNA_CONF_FILE=savanna.conf +SAVANNA_CONF_FILE=${SAVANNA_CONF_DIR}/savanna.conf SAVANNA_DEBUG=${SAVANNA_DEBUG:-True} SAVANNA_SERVICE_HOST=${SAVANNA_SERVICE_HOST:-$SERVICE_HOST} @@ -88,24 +88,24 @@ function configure_savanna() { sudo chown $STACK_USER $SAVANNA_CONF_DIR # Copy over savanna configuration file and configure common parameters. - cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE + cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_FILE - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_username savanna - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG + iniset $SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD + iniset $SAVANNA_CONF_FILE DEFAULT os_admin_username savanna + iniset $SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME + iniset $SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database connection `database_connection_url savanna` + iniset $SAVANNA_CONF_FILE database connection `database_connection_url savanna` if is_service_enabled neutron; then - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_neutron true - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_floating_ips true + iniset $SAVANNA_CONF_FILE DEFAULT use_neutron true + iniset $SAVANNA_CONF_FILE DEFAULT use_floating_ips true fi - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG + iniset $SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG recreate_database savanna utf8 - $SAVANNA_BIN_DIR/savanna-db-manage --config-file $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE upgrade head + $SAVANNA_BIN_DIR/savanna-db-manage --config-file $SAVANNA_CONF_FILE upgrade head } # install_savanna() - Collect source and prepare @@ -116,7 +116,7 @@ function install_savanna() { # start_savanna() - Start running processes, including screen function start_savanna() { - screen_it savanna "cd $SAVANNA_DIR && $SAVANNA_BIN_DIR/savanna-api --config-file $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE" + screen_it savanna "cd $SAVANNA_DIR && $SAVANNA_BIN_DIR/savanna-api --config-file $SAVANNA_CONF_FILE" } # stop_savanna() - Stop running processes From 1f76328027bb5cee0b0ea7077f4c59c919f1c4ae Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Tue, 28 Jan 2014 23:01:38 +0100 Subject: [PATCH 0396/4438] Stop all neutron-ns-metadata-proxy with stop_neutron Process name is actually python therefore neutron-ns-metadata-proxy pattern didn't match wanted process. Closes-bug: #1269982 Change-Id: Ib4439b0d32f103253b461841fa903c65763ff280 --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 960f11b154..f9ee484607 100644 --- a/lib/neutron +++ b/lib/neutron @@ -505,7 +505,7 @@ function stop_neutron() { [ ! -z "$pid" ] && sudo kill -9 $pid fi if is_service_enabled q-meta; then - sudo pkill -9 neutron-ns-metadata-proxy || : + sudo pkill -9 -f neutron-ns-metadata-proxy || : fi if is_service_enabled q-lbaas; then From 4a0cd374e2911adb33af44fa6643d6323ea523e6 Mon Sep 17 00:00:00 2001 From: shalini khandelwal Date: Wed, 29 Jan 2014 09:48:15 +0000 Subject: [PATCH 0397/4438] Renamed file 70-trove to 70-trove.sh Reason: Devstack not installing trove stack.sh ignores the trove installation script(70-trove) Change-Id: I3f179a6b5ded46e9f96a1c4bcc673ec52fa8bf0e Closes-Bug: #1274022 --- extras.d/{70-trove => 70-trove.sh} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename extras.d/{70-trove => 70-trove.sh} (100%) diff --git a/extras.d/70-trove b/extras.d/70-trove.sh similarity index 100% rename from extras.d/70-trove rename to extras.d/70-trove.sh From f2c1a712e82ac1d347b0fb6526c79471a9ef8d55 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 29 Jan 2014 21:38:14 +0000 Subject: [PATCH 0398/4438] Copy container-sync-realms.conf in /etc/swift We need the new container-sync realms configuration or we will get a nasty harmless error opening file at swift proxy startup. Change-Id: If939da305dcb9403c418219032ac6b50b0099bd3 Closes-Bug: 1274295 --- lib/swift | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/swift b/lib/swift index 37b630c3fa..baa03ec5b8 100644 --- a/lib/swift +++ b/lib/swift @@ -258,6 +258,8 @@ function configure_swift() { SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONF_DIR}/proxy-server.conf cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER} + cp ${SWIFT_DIR}/etc/container-sync-realms.conf-sample ${SWIFT_CONF_DIR}/container-sync-realms.conf + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${STACK_USER} From 4237f590b7b93117e59f9f777bc70d212969f61a Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 29 Jan 2014 16:22:11 -0600 Subject: [PATCH 0399/4438] Generate Tempest service list rather than hard-code it The list of services that Tempest used to set its 'service_available' config values was hard-coded. To be plugin-friendly have each service (project) add its name to the TEMPEST_SERVICES variable and use that for setting the 'service_avilable' values. Change-Id: I208efd7fd0798b18ac2e6353ee70b773e84a2683 --- lib/ceilometer | 5 ++++- lib/cinder | 3 +++ lib/glance | 3 +++ lib/heat | 4 ++++ lib/horizon | 3 +++ lib/ironic | 3 +++ lib/marconi | 4 ++++ lib/neutron | 4 ++++ lib/nova | 3 +++ lib/savanna | 4 ++++ lib/swift | 3 +++ lib/tempest | 2 +- lib/trove | 4 ++++ stackrc | 6 ++++++ 14 files changed, 49 insertions(+), 2 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 6f3896f2d4..30bf3aed50 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -52,7 +52,10 @@ CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql} CEILOMETER_SERVICE_PROTOCOL=http CEILOMETER_SERVICE_HOST=$SERVICE_HOST CEILOMETER_SERVICE_PORT=${CEILOMETER_SERVICE_PORT:-8777} -# + +# Tell Tempest this project is present +TEMPEST_SERVICES+=,ceilometer + # Functions # --------- diff --git a/lib/cinder b/lib/cinder index d76a41d4b8..9f70b2a0c9 100644 --- a/lib/cinder +++ b/lib/cinder @@ -79,6 +79,9 @@ VOLUME_BACKING_DEVICE2=${VOLUME_BACKING_DEVICE2:-} VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,cinder + # Functions # --------- diff --git a/lib/glance b/lib/glance index 55d5fb37ec..2d41ea4653 100644 --- a/lib/glance +++ b/lib/glance @@ -52,6 +52,9 @@ fi # Glance connection info. Note the port must be specified. GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,glance + # Functions # --------- diff --git a/lib/heat b/lib/heat index b9b8aa66ca..467619f3c6 100644 --- a/lib/heat +++ b/lib/heat @@ -38,6 +38,10 @@ HEAT_CONF=$HEAT_CONF_DIR/heat.conf HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates +# Tell Tempest this project is present +TEMPEST_SERVICES+=,heat + + # Functions # --------- diff --git a/lib/horizon b/lib/horizon index 5bff712743..c64d8502ba 100644 --- a/lib/horizon +++ b/lib/horizon @@ -31,6 +31,9 @@ HORIZON_DIR=$DEST/horizon # The example file in Horizon repo is used by default. HORIZON_SETTINGS=${HORIZON_SETTINGS:-$HORIZON_DIR/openstack_dashboard/local/local_settings.py.example} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,horizon + # Functions # --------- diff --git a/lib/ironic b/lib/ironic index afbc3e09e4..b8838f59fb 100644 --- a/lib/ironic +++ b/lib/ironic @@ -42,6 +42,9 @@ IRONIC_BIN_DIR=$(get_python_exec_prefix) IRONIC_SERVICE_PROTOCOL=http IRONIC_HOSTPORT=${IRONIC_HOSTPORT:-$SERVICE_HOST:6385} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,ironic + # Functions # --------- diff --git a/lib/marconi b/lib/marconi index 6b9ffdc0b3..1eaebbdf16 100644 --- a/lib/marconi +++ b/lib/marconi @@ -51,6 +51,10 @@ MARCONI_BRANCH=${MARCONI_BRANCH:-master} MARCONICLIENT_REPO=${MARCONICLIENT_REPO:-${GIT_BASE}/openstack/python-marconiclient.git} MARCONICLIENT_BRANCH=${MARCONICLIENT_BRANCH:-master} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,marconi + + # Functions # --------- diff --git a/lib/neutron b/lib/neutron index 960f11b154..68dfd4a6a3 100644 --- a/lib/neutron +++ b/lib/neutron @@ -237,6 +237,10 @@ else Q_USE_SECGROUP=False fi +# Tell Tempest this project is present +TEMPEST_SERVICES+=,neutron + + # Functions # --------- diff --git a/lib/nova b/lib/nova index dbaa3f53d9..9db19ed532 100644 --- a/lib/nova +++ b/lib/nova @@ -122,6 +122,9 @@ MULTI_HOST=`trueorfalse False $MULTI_HOST` TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,nova + # Functions # --------- diff --git a/lib/savanna b/lib/savanna index c7d59f79c4..176f290c35 100644 --- a/lib/savanna +++ b/lib/savanna @@ -40,6 +40,10 @@ else SAVANNA_BIN_DIR=$(get_python_exec_prefix) fi +# Tell Tempest this project is present +TEMPEST_SERVICES+=,savanna + + # Functions # --------- diff --git a/lib/swift b/lib/swift index 37b630c3fa..afdf995d2e 100644 --- a/lib/swift +++ b/lib/swift @@ -111,6 +111,9 @@ OBJECT_PORT_BASE=${OBJECT_PORT_BASE:-6013} CONTAINER_PORT_BASE=${CONTAINER_PORT_BASE:-6011} ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,swift + # Functions # --------- diff --git a/lib/tempest b/lib/tempest index ef9dfe218b..ee996657c2 100644 --- a/lib/tempest +++ b/lib/tempest @@ -329,7 +329,7 @@ function configure_tempest() { iniset $TEMPEST_CONF network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}" # service_available - for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna trove marconi; do + for service in ${TEMPEST_SERVICES//,/ }; do if is_service_enabled $service ; then iniset $TEMPEST_CONF service_available $service "True" else diff --git a/lib/trove b/lib/trove index 9c91024211..1fd011a530 100644 --- a/lib/trove +++ b/lib/trove @@ -38,6 +38,10 @@ else TROVE_BIN_DIR=$(get_python_exec_prefix) fi +# Tell Tempest this project is present +TEMPEST_SERVICES+=,trove + + # Functions # --------- diff --git a/stackrc b/stackrc index 8a0280ecfa..197b4cfc46 100644 --- a/stackrc +++ b/stackrc @@ -37,6 +37,12 @@ fi # enable_service tempest ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,mysql +# Tell Tempest which services are available. The default is set here as +# Tempest falls late in the configuration sequence. This differs from +# ``ENABLED_SERVICES`` in that the project names are used here rather than +# the service names, i.e.: TEMPEST_SERVICES="key,glance,nova" +TEMPEST_SERVICES="" + # Set the default Nova APIs to enable NOVA_ENABLED_APIS=ec2,osapi_compute,metadata From 6c57fbab26e40af5c5b19b46fb3da39341f34dab Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Wed, 20 Nov 2013 17:00:21 -0800 Subject: [PATCH 0400/4438] Set keystone admin_bind_host to KEYSTONE_SERVICE_HOST On Linux ports 32768-61000 can be used by just about anything needing a socket. Keystone's IANA assigned port is 35357. Occasionally something else will be using port 35357 first because Linux allows this. Workaround is to bind to port 127.0.0.1 instead of 0.0.0.0. $KEYSTONE_SERVICE_HOST gets its value from $SERVICE_HOST which is set to 127.0.0.1 in the gate. "Ephemeral (client) ports will *never* be sourced from 0.0.0.0, and are uniquely identified by the full connection five-tuple (proto, src IP, src port, dst IP, dst port) anyway, allowing them to overlap src IP/src port as long as proto/dst IP/dst port are different. Thus it is up to keystone/devstack to bind more appropriately and not use wildcard bind addresses unless explicitly necessary for some reason. For example, in the log output, the URLs are configured with dst IPs of 127.0.0.1 anyway, so binding explicitly to localhost would change nothing, while skirting this particular edge case nicely." ~Evan Callicoat This doesn't fix bug 1253482 it works around it while a better solution is prepared (running keystone behind apache in devstack). Co-Authored-By: Joe Gordon Change-Id: I112309661dadf8b753c3311182f82464d9d3595e Related-bug: #1253482 --- lib/keystone | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/keystone b/lib/keystone index 0850fb219e..4f7f68b57f 100644 --- a/lib/keystone +++ b/lib/keystone @@ -178,6 +178,7 @@ function configure_keystone() { # Set the URL advertised in the ``versions`` structure returned by the '/' route iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(public_port)s/" iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(admin_port)s/" + iniset $KEYSTONE_CONF DEFAULT admin_bind_host "$KEYSTONE_SERVICE_HOST" # Register SSL certificates if provided if is_ssl_enabled_service key; then From ec5918f2f6ee54c3384e85866e98b67ef01e1e1e Mon Sep 17 00:00:00 2001 From: Ben Nemec Date: Thu, 30 Jan 2014 16:07:23 +0000 Subject: [PATCH 0401/4438] Retry rabbitmq password change Due to the bug referenced below, on Fedora it is possible for the rabbitmq password change to fail the first time rabbitmq is started. This change adds a retry loop to avoid the problem in devstack. One retry should be enough in most (all?) cases, but this will retry up to ten times just to be safe. Note that just retrying the password change is not enough. The rabbitmq-server service must be restarted as well. Change-Id: I403dcd503aa8e74e2ba6312a0decf0d4fd0d8795 bz: https://bugzilla.redhat.com/show_bug.cgi?id=1059028 --- lib/rpc_backend | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index f59c80096f..3651bc0d20 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -139,12 +139,18 @@ function restart_rpc_backend() { if is_service_enabled rabbit; then # Start rabbitmq-server echo_summary "Starting RabbitMQ" - if is_fedora || is_suse; then - # service is not started by default - restart_service rabbitmq-server - fi - # change the rabbit password since the default is "guest" - sudo rabbitmqctl change_password guest $RABBIT_PASSWORD + # NOTE(bnemec): Retry initial rabbitmq configuration to deal with + # the fact that sometimes it fails to start properly. + # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1059028 + for i in `seq 10`; do + if is_fedora || is_suse; then + # service is not started by default + restart_service rabbitmq-server + fi + # change the rabbit password since the default is "guest" + sudo rabbitmqctl change_password guest $RABBIT_PASSWORD && break + [[ $i -eq "10" ]] && die $LINENO "Failed to set rabbitmq password" + done if is_service_enabled n-cell; then # Add partitioned access for the child cell if [ -z `sudo rabbitmqctl list_vhosts | grep child_cell` ]; then From f84eb5ba43ec0d548e59d982ec149a8feaa4d4d0 Mon Sep 17 00:00:00 2001 From: Don Dugger Date: Thu, 30 Jan 2014 09:59:30 -0700 Subject: [PATCH 0402/4438] Add support for Gantt Gantt is the new breakout of the scheduler code from the Nova source tree. These changes allow devstack to install/configure/startup gantt as the scheduler service for openstack. Change-Id: Ia2b6001f5ccf2469ee9fdee67564c9a915a13862 --- extras.d/70-gantt.sh | 31 ++++++++++++++ lib/gantt | 96 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 127 insertions(+) create mode 100644 extras.d/70-gantt.sh create mode 100644 lib/gantt diff --git a/extras.d/70-gantt.sh b/extras.d/70-gantt.sh new file mode 100644 index 0000000000..ac1efba748 --- /dev/null +++ b/extras.d/70-gantt.sh @@ -0,0 +1,31 @@ +# gantt.sh - Devstack extras script to install Gantt + +if is_service_enabled n-sch; then + disable_service gantt +fi + +if is_service_enabled gantt; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/gantt + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + echo_summary "Installing Gantt" + install_gantt + cleanup_gantt + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + echo_summary "Configuring Gantt" + configure_gantt + + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + # Initialize gantt + init_gantt + + # Start gantt + echo_summary "Starting Gantt" + start_gantt + fi + + if [[ "$1" == "unstack" ]]; then + stop_gantt + fi +fi diff --git a/lib/gantt b/lib/gantt new file mode 100644 index 0000000000..832d7590df --- /dev/null +++ b/lib/gantt @@ -0,0 +1,96 @@ +# lib/gantt +# Install and start **Gantt** scheduler service + +# Dependencies: +# +# - functions +# - DEST, DATA_DIR, STACK_USER must be defined + +# stack.sh +# --------- +# - install_gantt +# - configure_gantt +# - init_gantt +# - start_gantt +# - stop_gantt +# - cleanup_gantt + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- + +# set up default directories +GANTT_DIR=$DEST/gantt +GANTT_STATE_PATH=${GANTT_STATE_PATH:=$DATA_DIR/gantt} +GANTT_REPO=${GANTT_REPO:-${GIT_BASE}/openstack/gantt.git} +GANTT_BRANCH=${GANTT_BRANCH:-master} + +GANTTCLIENT_DIR=$DEST/python-ganttclient +GANTTCLIENT_REPO=${GANTT_REPO:-${GIT_BASE}/openstack/python-ganttclient.git} +GANTTCLIENT_BRANCH=${GANTT_BRANCH:-master} + +# eventually we will have a separate gantt config +# file but for compatibility reasone stick with +# nova.conf for now +GANTT_CONF_DIR=${GANTT_CONF_DIR:-/etc/nova} +GANTT_CONF=$GANTT_CONF_DIR/nova.conf + +# Support entry points installation of console scripts +GANTT_BIN_DIR=$(get_python_exec_prefix) + + +# Functions +# --------- + +# cleanup_gantt() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_gantt() { + echo "Cleanup Gantt" +} + +# configure_gantt() - Set config files, create data dirs, etc +function configure_gantt() { + echo "Configure Gantt" +} + +# init_gantt() - Initialize database and volume group +function init_gantt() { + echo "Initialize Gantt" +} + +# install_gantt() - Collect source and prepare +function install_gantt() { + git_clone $GANTT_REPO $GANTT_DIR $GANTT_BRANCH + setup_develop $GANTT_DIR +} + +# install_ganttclient() - Collect source and prepare +function install_ganttclient() { + echo "Install Gantt Client" +# git_clone $GANTTCLIENT_REPO $GANTTCLIENT_DIR $GANTTCLIENT_BRANCH +# setup_develop $GANTTCLIENT_DIR +} + +# start_gantt() - Start running processes, including screen +function start_gantt() { + if is_service_enabled gantt; then + screen_it gantt "cd $GANTT_DIR && $GANTT_BIN_DIR/gantt-scheduler --config-file $GANTT_CONF" + fi +} + +# stop_gantt() - Stop running processes +function stop_gantt() { + echo "Stop Gantt" + screen_stop gantt +} + +# Restore xtrace +$XTRACE + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: From 2dac885e6c48989d9a7bc89aca2b69503d2b3399 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Fri, 31 Jan 2014 01:25:28 -0500 Subject: [PATCH 0403/4438] Pull docker images from global registry The global docker registry is where images are being built and uploaded. It's effectively docker's version of, say, 'pip'. The static tarballs are not only an extra maintenance burden as they're outside the standard build and publishing process, but are presently outside the scope of an open development / release process as well. While this process does cause some trouble with network-independence for CI purposes, the fetching is still done from install-docker.sh; Additionally, this driver is not currently tested via the community CI effort. Change-Id: I3ee6bfee9c273cd3aabe1e00a1d1a8856a466189 --- lib/nova_plugins/hypervisor-docker | 8 ++++---- tools/docker/install_docker.sh | 23 ++++++----------------- 2 files changed, 10 insertions(+), 21 deletions(-) diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index 0153953d6c..bb934b87d6 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -31,10 +31,10 @@ DOCKER_UNIX_SOCKET=/var/run/docker.sock DOCKER_PID_FILE=/var/run/docker.pid DOCKER_REGISTRY_PORT=${DOCKER_REGISTRY_PORT:-5042} -DOCKER_IMAGE=${DOCKER_IMAGE:-http://get.docker.io/images/openstack/docker-ut.tar.gz} -DOCKER_IMAGE_NAME=docker-busybox -DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-http://get.docker.io/images/openstack/docker-registry.tar.gz} -DOCKER_REGISTRY_IMAGE_NAME=docker-registry +DOCKER_IMAGE=${DOCKER_IMAGE:-busybox:latest} +DOCKER_IMAGE_NAME=busybox +DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-registry:latest} +DOCKER_REGISTRY_IMAGE_NAME=registry DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME} DOCKER_APT_REPO=${DOCKER_APT_REPO:-https://get.docker.io/ubuntu} diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh index 375cfe958b..4fa23864fb 100755 --- a/tools/docker/install_docker.sh +++ b/tools/docker/install_docker.sh @@ -55,21 +55,10 @@ if ! timeout $SERVICE_TIMEOUT sh -c "$CONFIGURE_CMD"; then die $LINENO "docker did not start" fi +# Get guest container image +docker pull $DOCKER_IMAGE +docker tag $DOCKER_IMAGE $DOCKER_IMAGE_NAME -# Get Docker image -if [[ ! -r $FILES/docker-ut.tar.gz ]]; then - (cd $FILES; curl -OR $DOCKER_IMAGE) -fi -if [[ ! -r $FILES/docker-ut.tar.gz ]]; then - die $LINENO "Docker image unavailable" -fi -docker import - $DOCKER_IMAGE_NAME <$FILES/docker-ut.tar.gz - -# Get Docker registry image -if [[ ! -r $FILES/docker-registry.tar.gz ]]; then - (cd $FILES; curl -OR $DOCKER_REGISTRY_IMAGE) -fi -if [[ ! -r $FILES/docker-registry.tar.gz ]]; then - die $LINENO "Docker registry image unavailable" -fi -docker import - $DOCKER_REGISTRY_IMAGE_NAME <$FILES/docker-registry.tar.gz +# Get docker-registry image +docker pull $REGISTRY_IMAGE +docker tag $REGISTRY_IMAGE $REGISTRY_IMAGE_NAME From 19a3814b9a3afc24a77c5c301622661f388475d5 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 30 Jan 2014 15:49:53 +0100 Subject: [PATCH 0404/4438] glance: stop using deprecated notifier_strategy Change-Id: Ic796f0ad57db45bf053312ad10815461528030b3 --- lib/glance | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/glance b/lib/glance index 2d41ea4653..07c4408efc 100644 --- a/lib/glance +++ b/lib/glance @@ -108,10 +108,8 @@ function configure_glance() { iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $GLANCE_API_CONF keystone_authtoken admin_user glance iniset $GLANCE_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - if is_service_enabled qpid; then - iniset $GLANCE_API_CONF DEFAULT notifier_strategy qpid - elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $GLANCE_API_CONF DEFAULT notifier_strategy rabbit + if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then + iniset $GLANCE_API_CONF DEFAULT notification_driver messaging fi iniset_rpc_backend glance $GLANCE_API_CONF DEFAULT iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api From 061c14da01bb25ff86e0bfdb5e1bed887cb63997 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 30 Jan 2014 15:51:37 +0100 Subject: [PATCH 0405/4438] ironic: remove notifier_strategy option This has never exited in Ironic, and it does not even uses notification. Change-Id: I4a3d386116561d9a22d650f123df1aae5ed9849e --- lib/ironic | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/ironic b/lib/ironic index b8838f59fb..983add83d1 100644 --- a/lib/ironic +++ b/lib/ironic @@ -105,11 +105,6 @@ function configure_ironic_api() { iniset $IRONIC_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $IRONIC_CONF_FILE keystone_authtoken admin_user ironic iniset $IRONIC_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD - if is_service_enabled qpid; then - iniset $IRONIC_CONF_FILE DEFAULT notifier_strategy qpid - elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $IRONIC_CONF_FILE DEFAULT notifier_strategy rabbit - fi iniset_rpc_backend ironic $IRONIC_CONF_FILE DEFAULT iniset $IRONIC_CONF_FILE keystone_authtoken signing_dir $IRONIC_AUTH_CACHE_DIR/api From 6114a518de8d2db560db193ed4bc26d6e1659ce7 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 31 Jan 2014 08:21:24 -0500 Subject: [PATCH 0406/4438] fix sar reporting in the gate the sar filter made an assumption of time display including an AM/PM... which isn't true in all environments. Hence the blank sysstat screen in the gate runs of late. This fixes that, and displays the first line which includes header version to make sure we are functioning. Change-Id: I537e0bf2127efaf337c4792bc23d938145c8990d --- tools/sar_filter.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/sar_filter.py b/tools/sar_filter.py index ed8c19687c..24ef0e476c 100755 --- a/tools/sar_filter.py +++ b/tools/sar_filter.py @@ -25,10 +25,10 @@ def is_data_line(line): def parse_line(line): - m = re.search('(\d\d:\d\d:\d\d \w\w)(\s+((\S+)\s*)+)', line) + m = re.search('(\d\d:\d\d:\d\d( \w\w)?)(\s+((\S+)\s*)+)', line) if m: date = m.group(1) - data = m.group(2).rstrip() + data = m.group(3).rstrip() return date, data else: return None, None @@ -47,6 +47,10 @@ def parse_line(line): data_line = "" printed_header = False current_ts = None + +# print out the first sysstat line regardless +print process.stdout.readline() + while True: nextline = process.stdout.readline() if nextline == '' and process.poll() is not None: From 43d950843769135d32ce316cfb0f72697a879623 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Thu, 30 Jan 2014 17:49:22 -0500 Subject: [PATCH 0407/4438] Install libguestfs for nova-compute on Ubuntu We were already installing this for n-cpu on rpm distros, but not Ubuntu. Install it so that nova-compute can use it for file injection, which is the preferred method over nbd. Set CONF.libvirt.inject_partition to -1. This enables using libguestfs to determine the proper partition to inject into. Don't bother trying to load the nbd kernel module anymore. It won't be used since we know always expect libguestfs to be installed. Change-Id: Ifa9d95bf759f1dad8685590a2df242d852dd2cb0 --- files/apts/n-cpu | 2 +- lib/nova | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/files/apts/n-cpu b/files/apts/n-cpu index 29e37603b7..b287107256 100644 --- a/files/apts/n-cpu +++ b/files/apts/n-cpu @@ -1,8 +1,8 @@ # Stuff for diablo volumes -nbd-client lvm2 open-iscsi open-iscsi-utils # Deprecated since quantal dist:precise genisoimage sysfsutils sg3-utils +python-guestfs diff --git a/lib/nova b/lib/nova index 9db19ed532..d5f7514be5 100644 --- a/lib/nova +++ b/lib/nova @@ -240,8 +240,10 @@ function configure_nova() { sudo sysctl -w net.ipv4.ip_forward=1 if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - # Attempt to load modules: network block device - used to manage qcow images - sudo modprobe nbd || true + # When libguestfs is available for file injection, enable using + # libguestfs to inspect the image and figure out the proper + # partition to inject into. + iniset $NOVA_CONF libvirt inject_partition '-1' # Check for kvm (hardware based virtualization). If unable to initialize # kvm, we drop back to the slower emulation mode (qemu). Note: many systems From c4f47345a588b15d83ebc5584c8698843b568a40 Mon Sep 17 00:00:00 2001 From: Jeremy Stanley Date: Sat, 25 Jan 2014 01:10:31 +0000 Subject: [PATCH 0408/4438] Make MySQL query logging optional * lib/databases/mysql: Wrap query log configuration in a check for a ENABLE_QUERY_LOGGING variable. * stackrc: Add the DATABASE_QUERY_LOGGING variable defaulted to True. Change-Id: Iddf8538ad0a1e36e2c6944dc70315984026c8245 --- lib/databases/mysql | 33 +++++++++++++++++++-------------- stackrc | 3 +++ 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 0eb8fdd7a2..476b4b91b7 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -87,20 +87,25 @@ function configure_database_mysql { default-storage-engine = InnoDB" $MY_CONF fi - # Turn on slow query log - sudo sed -i '/log.slow.queries/d' $MY_CONF - sudo sed -i -e "/^\[mysqld\]/ a \ -log-slow-queries = /var/log/mysql/mysql-slow.log" $MY_CONF - - # Log all queries (any query taking longer than 0 seconds) - sudo sed -i '/long.query.time/d' $MY_CONF - sudo sed -i -e "/^\[mysqld\]/ a \ -long-query-time = 0" $MY_CONF - - # Log all non-indexed queries - sudo sed -i '/log.queries.not.using.indexes/d' $MY_CONF - sudo sed -i -e "/^\[mysqld\]/ a \ -log-queries-not-using-indexes" $MY_CONF + if [[ "$DATABASE_QUERY_LOGGING" == "True" ]]; then + echo_summary "Enabling MySQL query logging" + + # Turn on slow query log + sudo sed -i '/log.slow.queries/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ + log-slow-queries = /var/log/mysql/mysql-slow.log" $MY_CONF + + # Log all queries (any query taking longer than 0 seconds) + sudo sed -i '/long.query.time/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ + long-query-time = 0" $MY_CONF + + # Log all non-indexed queries + sudo sed -i '/log.queries.not.using.indexes/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ + log-queries-not-using-indexes" $MY_CONF + + fi restart_service $MYSQL } diff --git a/stackrc b/stackrc index 49fb26b2c7..276ce33970 100644 --- a/stackrc +++ b/stackrc @@ -59,6 +59,9 @@ elif [[ -f $RC_DIR/.localrc.auto ]]; then source $RC_DIR/.localrc.auto fi +# This can be used to turn database query logging on and off +# (currently only implemented for MySQL backend) +DATABASE_QUERY_LOGGING=$(trueorfalse True $DATABASE_QUERY_LOGGING) # Repositories # ------------ From 1272bc5e93f171c8d7193475547c43b9032b5c39 Mon Sep 17 00:00:00 2001 From: Nikhil Manchanda Date: Fri, 31 Jan 2014 15:04:05 -0800 Subject: [PATCH 0409/4438] Pipeline filter is 'authtoken' and not 'tokenauth' The pipeline fileter in the api-paste.ini for the keystone middleware was renamed to 'authtoken'. Trove install is not able to authenticate against keystone unless this is renamed Change-Id: I6f912d29c143b3acbc43da222cf8b4c3fafb2c8d --- lib/trove | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/trove b/lib/trove index 1fd011a530..2000446b13 100644 --- a/lib/trove +++ b/lib/trove @@ -129,14 +129,14 @@ function configure_trove() { # Copy api-paste file over to the trove conf dir and configure it cp $TROVE_LOCAL_CONF_DIR/api-paste.ini $TROVE_CONF_DIR/api-paste.ini TROVE_API_PASTE_INI=$TROVE_CONF_DIR/api-paste.ini - iniset $TROVE_API_PASTE_INI filter:tokenauth auth_host $KEYSTONE_AUTH_HOST - iniset $TROVE_API_PASTE_INI filter:tokenauth auth_port $KEYSTONE_AUTH_PORT - iniset $TROVE_API_PASTE_INI filter:tokenauth auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $TROVE_API_PASTE_INI filter:tokenauth cafile $KEYSTONE_SSL_CA - iniset $TROVE_API_PASTE_INI filter:tokenauth admin_tenant_name $SERVICE_TENANT_NAME - iniset $TROVE_API_PASTE_INI filter:tokenauth admin_user trove - iniset $TROVE_API_PASTE_INI filter:tokenauth admin_password $SERVICE_PASSWORD - iniset $TROVE_API_PASTE_INI filter:tokenauth signing_dir $TROVE_AUTH_CACHE_DIR + iniset $TROVE_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $TROVE_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $TROVE_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $TROVE_API_PASTE_INI filter:authtoken cafile $KEYSTONE_SSL_CA + iniset $TROVE_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $TROVE_API_PASTE_INI filter:authtoken admin_user trove + iniset $TROVE_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD + iniset $TROVE_API_PASTE_INI filter:authtoken signing_dir $TROVE_AUTH_CACHE_DIR # (Re)create trove conf files rm -f $TROVE_CONF_DIR/trove.conf From 7bc783b95b2e115f40a4db8823823573afe7a768 Mon Sep 17 00:00:00 2001 From: Nathan Kinder Date: Fri, 31 Jan 2014 16:54:10 -0800 Subject: [PATCH 0410/4438] LDAP root DN creation fails When keystone is configured to set up an LDAP server to use as it's identity backend, the creation of the root DN fails. The problem is that one of the mods in the modify operation that sets up the root DN is incorrect, which causes the entire modify operation to fail. The incorrect mod is attempting to configure some attribute indexes, but one of the attributes it specifies is undefined. This patch removes the undefined attribute from the template that is used to create the modify operation. Change-Id: I413587130c64ca4f5f467b2ea1c0ab12867999ce Closes-Bug: 1275158 --- files/ldap/manager.ldif.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in index de3b69de7c..2f1f1395ee 100644 --- a/files/ldap/manager.ldif.in +++ b/files/ldap/manager.ldif.in @@ -12,4 +12,4 @@ olcRootPW: ${SLAPPASS} replace: olcDbIndex olcDbIndex: objectClass eq olcDbIndex: default pres,eq -olcDbIndex: cn,sn,givenName,co +olcDbIndex: cn,sn,givenName From 6bf1f1fb332c93cb4b74cf6b6511d2f9818a501d Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Sat, 1 Feb 2014 17:05:18 -0500 Subject: [PATCH 0411/4438] use ext4 for guest default ephemeral this isn't upstream default because of compatibility questions with really old host on providers. However there is no reason not to do it in devstack. Change-Id: I6438c0efb297cfa5d3dbb5f00701b24f01c39d14 --- lib/nova_plugins/hypervisor-libvirt | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 6f90f4ac17..42d3af15cf 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -108,6 +108,7 @@ EOF" iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" iniset $NOVA_CONF DEFAULT use_usb_tablet "False" + iniset $NOVA_CONF DEFAULT default_ephemeral_format "ext4" iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver" LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" From 2ed4ae70b820ad3cbd12f2b6c2452ff66005ebaa Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 2 Feb 2014 09:38:05 +0100 Subject: [PATCH 0412/4438] Have ceilometer to respect the keystone settings lib/ceilometer ignored the global settings related to keystone settings. It can cause issues for example when the keystone does not listen on 127.0.0.1 even in single node deployment. Change-Id: I6e4654daa2ec624ac11aaf7f49495fcfaa72071d --- lib/ceilometer | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ceilometer b/lib/ceilometer index 30bf3aed50..75c00b6b07 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -137,7 +137,9 @@ function configure_ceilometer() { iniset $CEILOMETER_CONF DEFAULT os_password $SERVICE_PASSWORD iniset $CEILOMETER_CONF DEFAULT os_tenant_name $SERVICE_TENANT_NAME - iniset $CEILOMETER_CONF keystone_authtoken auth_protocol http + iniset $CEILOMETER_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $CEILOMETER_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $CEILOMETER_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL iniset $CEILOMETER_CONF keystone_authtoken admin_user ceilometer iniset $CEILOMETER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME From 85a85f87f814446dd2364eea1b6d976d50500203 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Tue, 21 Jan 2014 11:13:55 +0100 Subject: [PATCH 0413/4438] Use service role with glance service glance just used to admin role for token validation, the service role is sufficient for this. glance also needs an user with enough permission to use swift, so creating a dedictated service user for swift usage when s-proxy is enabled. Change-Id: I6df3905e5db35ea3421468ca1ee6d8de3271f8d1 --- files/keystone_data.sh | 24 +++++++++++++++++++----- lib/glance | 2 +- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index d477c42906..9a34c7616f 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -2,12 +2,14 @@ # # Initial data for Keystone using python-keystoneclient # -# Tenant User Roles +# Tenant User Roles # ------------------------------------------------------------------ -# service glance admin -# service heat service # if enabled +# service glance service +# service glance-swift ResellerAdmin +# service heat service # if enabled +# service ceilometer admin # if enabled # Tempest Only: -# alt_demo alt_demo Member +# alt_demo alt_demo Member # # Variables set before calling this script: # SERVICE_TOKEN - aka admin_token in keystone.conf @@ -96,7 +98,19 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then keystone user-role-add \ --tenant $SERVICE_TENANT_NAME \ --user glance \ - --role admin + --role service + # required for swift access + if [[ "$ENABLED_SERVICES" =~ "s-proxy" ]]; then + keystone user-create \ + --name=glance-swift \ + --pass="$SERVICE_PASSWORD" \ + --tenant $SERVICE_TENANT_NAME \ + --email=glance-swift@example.com + keystone user-role-add \ + --tenant $SERVICE_TENANT_NAME \ + --user glance-swift \ + --role ResellerAdmin + fi if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then keystone service-create \ --name=glance \ diff --git a/lib/glance b/lib/glance index 2d41ea4653..00f499a0b9 100644 --- a/lib/glance +++ b/lib/glance @@ -124,7 +124,7 @@ function configure_glance() { if is_service_enabled s-proxy; then iniset $GLANCE_API_CONF DEFAULT default_store swift iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ - iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance + iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance-swift iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True From 8664ca53f80849553043aba9663f7cb72a9cec42 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 2 Feb 2014 10:07:39 +0100 Subject: [PATCH 0414/4438] bash_completion for heat and ceilometer Installing bash completion for heat and ceilometer by using a similar way used with other services. Change-Id: I5094648272f2666f6bff181bfa3aeb35e863bd97 --- lib/ceilometer | 1 + lib/heat | 1 + 2 files changed, 2 insertions(+) diff --git a/lib/ceilometer b/lib/ceilometer index 30bf3aed50..6a72459d41 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -105,6 +105,7 @@ function cleanup_ceilometer() { # configure_ceilometerclient() - Set config files, create data dirs, etc function configure_ceilometerclient() { setup_develop $CEILOMETERCLIENT_DIR + sudo install -D -m 0644 -o $STACK_USER {$CEILOMETERCLIENT_DIR/tools/,/etc/bash_completion.d/}ceilometer.bash_completion } # configure_ceilometer() - Set config files, create data dirs, etc diff --git a/lib/heat b/lib/heat index 467619f3c6..f171cb450c 100644 --- a/lib/heat +++ b/lib/heat @@ -157,6 +157,7 @@ function create_heat_cache_dir() { function install_heatclient() { git_clone $HEATCLIENT_REPO $HEATCLIENT_DIR $HEATCLIENT_BRANCH setup_develop $HEATCLIENT_DIR + sudo install -D -m 0644 -o $STACK_USER {$HEATCLIENT_DIR/tools/,/etc/bash_completion.d/}heat.bash_completion } # install_heat() - Collect source and prepare From 0af8122834917b4e44ee0cfae22eb5f93472f1a6 Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Sun, 2 Feb 2014 09:59:07 +1300 Subject: [PATCH 0415/4438] Disable file injection for libvirt driver Change-Id: I73289195d3bb455f4076fadd2eadd6036b04b722 --- lib/nova | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/lib/nova b/lib/nova index d5f7514be5..0db242a34a 100644 --- a/lib/nova +++ b/lib/nova @@ -240,10 +240,9 @@ function configure_nova() { sudo sysctl -w net.ipv4.ip_forward=1 if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - # When libguestfs is available for file injection, enable using - # libguestfs to inspect the image and figure out the proper - # partition to inject into. - iniset $NOVA_CONF libvirt inject_partition '-1' + # File injection is being disabled by default in the near future - + # disable it here for now to avoid surprises later. + iniset $NOVA_CONF libvirt inject_partition '-2' # Check for kvm (hardware based virtualization). If unable to initialize # kvm, we drop back to the slower emulation mode (qemu). Note: many systems From 0d4bd7e6104bee974a544422456d731eb664805c Mon Sep 17 00:00:00 2001 From: Anita Kuno Date: Sun, 2 Feb 2014 14:59:39 -0600 Subject: [PATCH 0416/4438] Silence commands to echo copyright notices This patch silences commands that echoed copyright notices to the devstack logs. The copyright notices are moved to the top of the file as comments. Change-Id: I8d474a366af2954c168ba8d07329392f56e8e75a --- exercises/neutron-adv-test.sh | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index 1343f11553..a9199e62a6 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -1,6 +1,11 @@ #!/usr/bin/env bash # - +# Copyright 2012, Cisco Systems +# Copyright 2012, VMware, Inc. +# Copyright 2012, NTT MCL, Inc. +# +# Please direct any questions to dedutta@cisco.com, dwendlandt@vmware.com, nachi@nttmcl.com +# # **neutron-adv-test.sh** # Perform integration testing of Nova and other components with Neutron. @@ -406,14 +411,6 @@ usage() { main() { echo Description - echo - echo Copyright 2012, Cisco Systems - echo Copyright 2012, VMware, Inc. - echo Copyright 2012, NTT MCL, Inc. - echo - echo Please direct any questions to dedutta@cisco.com, dwendlandt@vmware.com, nachi@nttmcl.com - echo - if [ $# -eq 0 ] ; then # if no args are provided, run all tests From c643ebb26dac484e56aea7b5f30d97fe7711f6f3 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Sun, 2 Feb 2014 09:16:20 +0000 Subject: [PATCH 0417/4438] XenAPI: Fix new useage of trueorfalse * Ensure that Xen setup scripts will continue to function when unset variables are used in stackrc * Ensure that the generic functions are sourced in all places that xenrc (which sources stackrc) is sourced. Change-Id: I54eba20733c2e149621b74a1387f0bef14fca12e --- tools/xen/build_xva.sh | 10 ++++++++++ tools/xen/prepare_guest_template.sh | 10 ++++++++++ tools/xen/xenrc | 3 +++ 3 files changed, 23 insertions(+) diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index 958102b29c..fbbfd6fbe5 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -21,9 +21,19 @@ set -o xtrace # This directory TOP_DIR=$(cd $(dirname "$0") && pwd) +# Source lower level functions +. $TOP_DIR/../../functions + # Include onexit commands . $TOP_DIR/scripts/on_exit.sh +# xapi functions +. $TOP_DIR/functions + +# Determine what system we are running on. +# Might not be XenServer if we're using xenserver-core +GetDistro + # Source params - override xenrc params in your localrc to suite your taste source xenrc diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh index 546ac99cd9..4fa70d377d 100755 --- a/tools/xen/prepare_guest_template.sh +++ b/tools/xen/prepare_guest_template.sh @@ -22,9 +22,19 @@ set -o xtrace # This directory TOP_DIR=$(cd $(dirname "$0") && pwd) +# Source lower level functions +. $TOP_DIR/../../functions + # Include onexit commands . $TOP_DIR/scripts/on_exit.sh +# xapi functions +. $TOP_DIR/functions + +# Determine what system we are running on. +# Might not be XenServer if we're using xenserver-core +GetDistro + # Source params - override xenrc params in your localrc to suite your taste source xenrc diff --git a/tools/xen/xenrc b/tools/xen/xenrc index cd282341cb..96f3734a1d 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -91,4 +91,7 @@ UBUNTU_INST_GATEWAY="" # Set the size to 0 to avoid creation of additional disk. XEN_XVDB_SIZE_GB=0 +restore_nounset=`set +o | grep nounset` +set +u source ../../stackrc +$restore_nounset From ca920576cb9c36b7d26a3ce523c9d9a25b3f5db8 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 3 Feb 2014 15:26:20 +0100 Subject: [PATCH 0418/4438] nova: use the correct notification driver Nova now uses oslo.messaging and not the Oslo RPC code anymore, therefore the new driver should be used instead. Change-Id: I3533975ad38ff99bee6cfaa5332843444650f61f --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index d5f7514be5..722b994896 100644 --- a/lib/nova +++ b/lib/nova @@ -447,7 +447,7 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT instance_usage_audit "True" iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour" iniset $NOVA_CONF DEFAULT notify_on_state_change "vm_and_task_state" - iniset $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier" + iniset $NOVA_CONF DEFAULT notification_driver "messaging" fi # Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS`` From a03607d03f18fbe842bb61a509a868e1447fc379 Mon Sep 17 00:00:00 2001 From: Ivar Lazzaro Date: Mon, 3 Feb 2014 06:28:14 -0800 Subject: [PATCH 0419/4438] Embrane Plugin Support Implements blueprint embrane-plugin-support This commit implements Embrane's Neutron plugin installation support in Devstack. This is an extension of the openvswitch installation module, which is used by the main plugin, and enables configuration by localrc Change-Id: Ia4824f8d2300bcdce170d226145bbce6088f1557 --- lib/neutron_plugins/embrane | 40 +++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 lib/neutron_plugins/embrane diff --git a/lib/neutron_plugins/embrane b/lib/neutron_plugins/embrane new file mode 100644 index 0000000000..4206a2053c --- /dev/null +++ b/lib/neutron_plugins/embrane @@ -0,0 +1,40 @@ +# Neutron Embrane plugin +# --------------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/neutron_plugins/openvswitch + +save_function() { + local ORIG_FUNC=$(declare -f $1) + local NEW_FUNC="$2${ORIG_FUNC#$1}" + eval "$NEW_FUNC" +} + +save_function neutron_plugin_configure_service _neutron_plugin_configure_service + +function neutron_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/embrane + Q_PLUGIN_CONF_FILENAME=heleos_conf.ini + Q_DB_NAME="ovs_neutron" + Q_PLUGIN_CLASS="neutron.plugins.embrane.plugins.embrane_ovs_plugin.EmbraneOvsPlugin" +} + +function neutron_plugin_configure_service() { + _neutron_plugin_configure_service + iniset /$Q_PLUGIN_CONF_FILE heleos esm_mgmt $HELEOS_ESM_MGMT + iniset /$Q_PLUGIN_CONF_FILE heleos admin_username $HELEOS_ADMIN_USERNAME + iniset /$Q_PLUGIN_CONF_FILE heleos admin_password $HELEOS_ADMIN_PASSWORD + iniset /$Q_PLUGIN_CONF_FILE heleos router_image $HELEOS_ROUTER_IMAGE + iniset /$Q_PLUGIN_CONF_FILE heleos mgmt_id $HELEOS_MGMT_ID + iniset /$Q_PLUGIN_CONF_FILE heleos inband_id $HELEOS_INBAND_ID + iniset /$Q_PLUGIN_CONF_FILE heleos oob_id $HELEOS_OOB_ID + iniset /$Q_PLUGIN_CONF_FILE heleos dummy_utif_id $HELEOS_DUMMY_UTIF_ID + iniset /$Q_PLUGIN_CONF_FILE heleos resource_pool_id $HELEOS_RESOURCE_POOL_ID + iniset /$Q_PLUGIN_CONF_FILE heleos async_requests $HELEOS_ASYNC_REQUESTS +} + +# Restore xtrace +$MY_XTRACE \ No newline at end of file From 0656e12d6819f6dee671dd6200b2d0895e716c2c Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 3 Feb 2014 08:49:30 +0900 Subject: [PATCH 0420/4438] add ability to ignore rules in bash8 Change-Id: Ia6472f4bb251bf3e9846e08e30b2f9ea30ea1c03 --- tools/bash8.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/tools/bash8.py b/tools/bash8.py index edf7da4645..2623358182 100755 --- a/tools/bash8.py +++ b/tools/bash8.py @@ -30,8 +30,18 @@ import re import sys - ERRORS = 0 +IGNORE = None + + +def register_ignores(ignores): + global IGNORE + if ignores: + IGNORE='^(' + '|'.join(ignores.split(',')) + ')' + + +def should_ignore(error): + return IGNORE and re.search(IGNORE, error) def print_error(error, line): @@ -97,11 +107,13 @@ def get_options(): description='A bash script style checker') parser.add_argument('files', metavar='file', nargs='+', help='files to scan for errors') + parser.add_argument('-i', '--ignore', help='Rules to ignore') return parser.parse_args() def main(): opts = get_options() + register_ignores(opts.ignore) check_files(opts.files) if ERRORS > 0: From 864902ed01f92a9f587ebf0b582357fe2a9ea086 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Mon, 3 Feb 2014 21:00:39 +0000 Subject: [PATCH 0421/4438] Use github for swift3. swift3 is not on OpenStack infra (yet) use the github url instead. Closes-Bug: #1275923 Change-Id: I0cc393f93b65dcf8642b3a35925eb9eba3c2e1eb --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 8a0280ecfa..b138f42546 100644 --- a/stackrc +++ b/stackrc @@ -162,7 +162,7 @@ REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-master} # storage service SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git} SWIFT_BRANCH=${SWIFT_BRANCH:-master} -SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/fujita/swift3.git} +SWIFT3_REPO=${SWIFT3_REPO:-http://github.com/fujita/swift3.git} SWIFT3_BRANCH=${SWIFT3_BRANCH:-master} # python swift client library From 891277fbbdf65427b43f194adaafbbf2a4ac4800 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Mon, 3 Feb 2014 21:07:03 +0000 Subject: [PATCH 0422/4438] s3_token has been moved to keystoneclient. Change-Id: I6ffe756d517d11f323bd0c5d3b877d9a9f739a3b --- lib/swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/swift b/lib/swift index 37b630c3fa..a182e5adfb 100644 --- a/lib/swift +++ b/lib/swift @@ -336,7 +336,7 @@ function configure_swift() { # NOTE(chmou): s3token middleware is not updated yet to use only # username and password. [filter:s3token] -paste.filter_factory = keystone.middleware.s3_token:filter_factory +paste.filter_factory = keystoneclient.middleware.s3_token:filter_factory auth_port = ${KEYSTONE_AUTH_PORT} auth_host = ${KEYSTONE_AUTH_HOST} auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} From f36a9b2136b4ba56ac2989f7829c55b4eb1c08af Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Mon, 3 Feb 2014 23:44:47 +0100 Subject: [PATCH 0423/4438] No need to loop over with pkill -f I guess four times is better than one but if we need four times to kill swift processes there is something pretty bad with it. Change-Id: Id2ea2f4ca60feb9fddc7b3181063760d2044b421 --- lib/swift | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/swift b/lib/swift index 54d6f1c2e6..28ca8a80df 100644 --- a/lib/swift +++ b/lib/swift @@ -657,10 +657,8 @@ function stop_swift() { if type -p swift-init >/dev/null; then swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true fi - for type in proxy object container account; do - # Dump all of the servers - pkill -f swift- - done + # Dump all of the servers + pkill -f swift- } # Restore xtrace From 8dad4bde886ed2a5bb28d8eb43cfa874ee81c790 Mon Sep 17 00:00:00 2001 From: Arnaud Legendre Date: Mon, 3 Feb 2014 17:57:39 -0800 Subject: [PATCH 0424/4438] upload_image.sh to support streamOptimized disks The current version of the script will use "preallocated" as the disk type of a stream optimized disk. This needs to be fixed by introspecting the createType of the vmdk file. Closes-Bug: #1275993 Change-Id: I98594acecf26dd1164870f43890254a19ef23fe9 --- functions | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/functions b/functions index 73d65ce15b..281b6767c5 100644 --- a/functions +++ b/functions @@ -1450,7 +1450,7 @@ function upload_image() { # vmdk disk type vmdk_create_type="$(head -25 $IMAGE | grep -a -F -m 1 'createType=' $IMAGE)" vmdk_create_type="${vmdk_create_type#*\"}" - vmdk_create_type="${vmdk_create_type%?}" + vmdk_create_type="${vmdk_create_type%\"*}" descriptor_data_pair_msg="Monolithic flat and VMFS disks "` `"should use a descriptor-data pair." @@ -1495,6 +1495,8 @@ function upload_image() { IMAGE_NAME="${flat_fname}" fi vmdk_disktype="preallocated" + elif [[ "$vmdk_create_type" = "streamOptimized" ]]; then + vmdk_disktype="streamOptimized" elif [[ -z "$vmdk_create_type" ]]; then # *-flat.vmdk provided: attempt to retrieve the descriptor (*.vmdk) # to retrieve appropriate metadata @@ -1533,10 +1535,8 @@ function upload_image() { vmdk_adapter_type="${vmdk_adapter_type%?}" fi fi - #TODO(alegendre): handle streamOptimized once supported by the VMware driver. vmdk_disktype="preallocated" else - #TODO(alegendre): handle streamOptimized once supported by the VMware driver. vmdk_disktype="preallocated" fi From d70ba82b14b0c47fd87a957e9f2ca5ddda69948b Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Tue, 4 Feb 2014 14:33:27 +1300 Subject: [PATCH 0425/4438] Move file injection setting to the right place The nova code was wiping nova.conf after our iniset :(. Change-Id: Ib618da1bd21da09f8855ec4691bff79c4c3b3d9c --- lib/nova | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/nova b/lib/nova index 0db242a34a..dbc5c3db44 100644 --- a/lib/nova +++ b/lib/nova @@ -240,10 +240,6 @@ function configure_nova() { sudo sysctl -w net.ipv4.ip_forward=1 if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - # File injection is being disabled by default in the near future - - # disable it here for now to avoid surprises later. - iniset $NOVA_CONF libvirt inject_partition '-2' - # Check for kvm (hardware based virtualization). If unable to initialize # kvm, we drop back to the slower emulation mode (qemu). Note: many systems # come with hardware virtualization disabled in BIOS. @@ -499,6 +495,12 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" iniset_rpc_backend nova $NOVA_CONF DEFAULT iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" + + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + # File injection is being disabled by default in the near future - + # disable it here for now to avoid surprises later. + iniset $NOVA_CONF libvirt inject_partition '-2' + fi } function init_nova_cells() { From b408dd2072462f47ae294b601039c12136034e5e Mon Sep 17 00:00:00 2001 From: Denis Makogon Date: Tue, 4 Feb 2014 12:58:59 +0200 Subject: [PATCH 0426/4438] Remove unneeded guest conf values Reasons: - guest service doesn't depend on "sql_connection" value any more; - "exchange_control" already set in trove-guestagent.conf.sample to "trove"; Change-Id: Ifbdb21ac4639d86cf7775634f5b31cfb9739b49f Closes-Bug: #1256046 --- lib/trove | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/trove b/lib/trove index 2000446b13..bb4549121d 100644 --- a/lib/trove +++ b/lib/trove @@ -148,8 +148,6 @@ function configure_trove() { iniset $TROVE_CONF_DIR/trove.conf DEFAULT add_addresses True iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT sql_connection `database_connection_url trove` - iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT control_exchange trove sed -i "s/localhost/$NETWORK_GATEWAY/g" $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample setup_trove_logging $TROVE_CONF_DIR/trove.conf From db1c3847752c84a9fc06186a3352f02b76c1aa7c Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Tue, 4 Feb 2014 20:58:00 +0000 Subject: [PATCH 0427/4438] Fix config group for cinder multi_backend This commit just updates the config group for the multi_backend option. Tempest change d5c9602b created a volume-feature-enabled group and moved this volume there but devstack was never updated with the change. Closes-Bug: #1276326 Change-Id: Icf2e96783feec4edbd4d477f8492651cd9bb3f01 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 06183b107b..6fa35d19fd 100644 --- a/lib/tempest +++ b/lib/tempest @@ -323,7 +323,7 @@ function configure_tempest() { fi CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND) if [ $CINDER_MULTI_LVM_BACKEND == "True" ]; then - iniset $TEMPEST_CONFIG volume multi_backend_enabled "True" + iniset $TEMPEST_CONFIG volume-feature-enabled multi_backend "True" iniset $TEMPEST_CONFIG volume backend1_name "LVM_iSCSI" iniset $TEMPEST_CONFIG volume backend2_name "LVM_iSCSI_2" fi From 41e36d6bcd3ab04cd3955aef68162c3266dc958e Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Tue, 4 Feb 2014 13:39:32 -0800 Subject: [PATCH 0428/4438] Replace NvpPluginV2 with NsxPlugin The king is dead, long live the king! Partial-implements blueprint: nicira-plugin-renaming Change-Id: I9b71479a8d4228d45a6591b169c489c0107fb04c --- lib/neutron_plugins/vmware_nsx | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/neutron_plugins/vmware_nsx b/lib/neutron_plugins/vmware_nsx index d506cb6f8d..74f98df577 100644 --- a/lib/neutron_plugins/vmware_nsx +++ b/lib/neutron_plugins/vmware_nsx @@ -41,8 +41,7 @@ function neutron_plugin_configure_common() { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware Q_PLUGIN_CONF_FILENAME=nsx.ini Q_DB_NAME="neutron_nsx" - # TODO(armando-migliaccio): rename this once the code rename is complete - Q_PLUGIN_CLASS="neutron.plugins.nicira.NeutronPlugin.NvpPluginV2" + Q_PLUGIN_CLASS="neutron.plugins.vmware.plugin.NsxPlugin" } function neutron_plugin_configure_debug_command() { From 1023ff7c3ac184da00b6306f361f285301849881 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 27 Jan 2014 14:56:44 -0600 Subject: [PATCH 0429/4438] Move ironic to plugin Also adds an is_ironic_enabled() function to prepare for an upcoming change in is_service_enabled(). Change-Id: I6e6e0e8b70221e231785ab27e9b5d4836933ac4c --- extras.d/50-ironic.sh | 33 +++++++++++++++++++++++++++++++++ lib/ironic | 7 +++++++ stack.sh | 21 --------------------- unstack.sh | 7 ------- 4 files changed, 40 insertions(+), 28 deletions(-) create mode 100644 extras.d/50-ironic.sh diff --git a/extras.d/50-ironic.sh b/extras.d/50-ironic.sh new file mode 100644 index 0000000000..f68a14680f --- /dev/null +++ b/extras.d/50-ironic.sh @@ -0,0 +1,33 @@ +# ironic.sh - Devstack extras script to install ironic + +if is_service_enabled ir-api ir-cond; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/ironic + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + echo_summary "Installing Ironic" + install_ironic + install_ironicclient + cleanup_ironic + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + echo_summary "Configuring Ironic" + configure_ironic + + if is_service_enabled key; then + create_ironic_accounts + fi + + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + # Initialize ironic + init_ironic + + # Start the ironic API and ironic taskmgr components + echo_summary "Starting Ironic" + start_ironic + fi + + if [[ "$1" == "unstack" ]]; then + stop_ironic + cleanup_ironic + fi +fi diff --git a/lib/ironic b/lib/ironic index afbc3e09e4..afb7c23d2c 100644 --- a/lib/ironic +++ b/lib/ironic @@ -46,6 +46,13 @@ IRONIC_HOSTPORT=${IRONIC_HOSTPORT:-$SERVICE_HOST:6385} # Functions # --------- +# Test if any Ironic services are enabled +# is_ironic_enabled +function is_ironic_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"ir-" ]] && return 0 + return 1 +} + # install_ironic() - Collect source and prepare function install_ironic() { git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH diff --git a/stack.sh b/stack.sh index 45d47c819c..a1cf595cf0 100755 --- a/stack.sh +++ b/stack.sh @@ -336,7 +336,6 @@ source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap -source $TOP_DIR/lib/ironic # Extras Source # -------------- @@ -746,11 +745,6 @@ if is_service_enabled tls-proxy; then # don't be naive and add to existing line! fi -if is_service_enabled ir-api ir-cond; then - install_ironic - install_ironicclient - configure_ironic -fi # Extras Install # -------------- @@ -966,15 +960,6 @@ if is_service_enabled g-reg; then fi -# Ironic -# ------ - -if is_service_enabled ir-api ir-cond; then - echo_summary "Configuring Ironic" - init_ironic -fi - - # Neutron # ------- @@ -1101,12 +1086,6 @@ if is_service_enabled g-api g-reg; then start_glance fi -# Launch the Ironic services -if is_service_enabled ir-api ir-cond; then - echo_summary "Starting Ironic" - start_ironic -fi - # Create an access key and secret key for nova ec2 register image if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1) diff --git a/unstack.sh b/unstack.sh index 92d0642c38..ea9c27d99b 100755 --- a/unstack.sh +++ b/unstack.sh @@ -55,7 +55,6 @@ source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap -source $TOP_DIR/lib/ironic # Extras Source # -------------- @@ -118,12 +117,6 @@ if is_service_enabled s-proxy; then cleanup_swift fi -# Ironic runs daemons -if is_service_enabled ir-api ir-cond; then - stop_ironic - cleanup_ironic -fi - # Apache has the WSGI processes if is_service_enabled horizon; then stop_horizon From 75dbd9b1a3d6fa7d72b95d72a3102d8fdc76fd34 Mon Sep 17 00:00:00 2001 From: "Walter A. Boring IV" Date: Tue, 4 Feb 2014 14:56:15 -0800 Subject: [PATCH 0430/4438] Added the import of lib/infra This fixes an error in the devstack/functions setup_develop call, which tries to cd to $REQUIREMENTS_DIR, which is created in lib/infra Change-Id: Ie65d2ba83547acc4ea36d1191e6e90dc21da1fa7 Closes-Bug: #1276365 --- driver_certs/cinder_driver_cert.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh index edcc6d4800..0221e3779c 100755 --- a/driver_certs/cinder_driver_cert.sh +++ b/driver_certs/cinder_driver_cert.sh @@ -24,6 +24,7 @@ TOP_DIR=$(cd $CERT_DIR/..; pwd) source $TOP_DIR/functions source $TOP_DIR/stackrc source $TOP_DIR/openrc +source $TOP_DIR/lib/infra source $TOP_DIR/lib/tempest source $TOP_DIR/lib/cinder From 16dd8b3ed94d5cd217d22a26c18dca52bfca115e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 3 Feb 2014 09:10:54 +0900 Subject: [PATCH 0431/4438] introduce if/then & for/do rules we mostly have a consistent style on if/then & for/do in devstack, except when we don't. This attempts to build a set of rules to enforce this. Because there are times when lines are legitimately long, and there is a continuation, this starts off ignoring if and for loops with continuations. But for short versions, we should enforce this. Changes to make devstack pass are included. The fact that the cleanup patch was so small is pretty solid reason that this is actually the style we've all agreed to. Part of a git stash from hong kong that I finally cleaned up. Change-Id: I6376d7afd59cc5ebba9ed69e5ee784a3d5934a10 --- lib/baremetal | 3 +-- lib/heat | 3 +-- lib/neutron_plugins/bigswitch_floodlight | 6 ++--- lib/neutron_plugins/nec | 3 +-- lib/neutron_thirdparty/bigswitch_floodlight | 3 +-- stack.sh | 4 +-- tests/functions.sh | 12 +++------ tools/bash8.py | 29 +++++++++++++++++++++ tools/xen/install_os_domU.sh | 3 +-- tools/xen/scripts/install-os-vpx.sh | 9 +++---- tools/xen/scripts/on_exit.sh | 6 ++--- tools/xen/test_functions.sh | 6 ++--- 12 files changed, 49 insertions(+), 38 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index a0df85e700..d8cd7e936c 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -431,8 +431,7 @@ function upload_baremetal_image() { function clear_baremetal_of_all_nodes() { list=$(nova baremetal-node-list | awk -F '| ' 'NR>3 {print $2}' ) - for node in $list - do + for node in $list; do nova baremetal-node-delete $node done } diff --git a/lib/heat b/lib/heat index f171cb450c..9f5dd8b588 100644 --- a/lib/heat +++ b/lib/heat @@ -186,8 +186,7 @@ function disk_image_create { local elements=$2 local arch=$3 local output=$TOP_DIR/files/$4 - if [[ -f "$output.qcow2" ]]; - then + if [[ -f "$output.qcow2" ]]; then echo "Image file already exists: $output_file" else ELEMENTS_PATH=$elements_path disk-image-create \ diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index 93ec497bb9..1e4aa00121 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -44,16 +44,14 @@ function neutron_plugin_configure_plugin_agent() { function neutron_plugin_configure_service() { iniset /$Q_PLUGIN_CONF_FILE restproxy servers $BS_FL_CONTROLLERS_PORT iniset /$Q_PLUGIN_CONF_FILE restproxy servertimeout $BS_FL_CONTROLLER_TIMEOUT - if [ "$BS_FL_VIF_DRIVER" = "ivs" ] - then + if [ "$BS_FL_VIF_DRIVER" = "ivs" ]; then iniset /$Q_PLUGIN_CONF_FILE nova vif_type ivs fi } function neutron_plugin_setup_interface_driver() { local conf_file=$1 - if [ "$BS_FL_VIF_DRIVER" = "ivs" ] - then + if [ "$BS_FL_VIF_DRIVER" = "ivs" ]; then iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.IVSInterfaceDriver else iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec index d8d8b7ce7e..1cb2fef533 100644 --- a/lib/neutron_plugins/nec +++ b/lib/neutron_plugins/nec @@ -106,8 +106,7 @@ function _neutron_setup_ovs_tunnels() { local id=0 GRE_LOCAL_IP=${GRE_LOCAL_IP:-$HOST_IP} if [ -n "$GRE_REMOTE_IPS" ]; then - for ip in ${GRE_REMOTE_IPS//:/ } - do + for ip in ${GRE_REMOTE_IPS//:/ }; do if [[ "$ip" == "$GRE_LOCAL_IP" ]]; then continue fi diff --git a/lib/neutron_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight index 1fd4fd801a..24c10443b7 100644 --- a/lib/neutron_thirdparty/bigswitch_floodlight +++ b/lib/neutron_thirdparty/bigswitch_floodlight @@ -24,8 +24,7 @@ function init_bigswitch_floodlight() { sudo ovs-vsctl --no-wait br-set-external-id ${OVS_BRIDGE} bridge-id ${OVS_BRIDGE} ctrls= - for ctrl in `echo ${BS_FL_CONTROLLERS_PORT} | tr ',' ' '` - do + for ctrl in `echo ${BS_FL_CONTROLLERS_PORT} | tr ',' ' '`; do ctrl=${ctrl%:*} ctrls="${ctrls} tcp:${ctrl}:${BS_FL_OF_PORT}" done diff --git a/stack.sh b/stack.sh index 45d47c819c..15e14303cf 100755 --- a/stack.sh +++ b/stack.sh @@ -1124,8 +1124,8 @@ fi # Create a randomized default value for the keymgr's fixed_key if is_service_enabled nova; then FIXED_KEY="" - for i in $(seq 1 64); - do FIXED_KEY+=$(echo "obase=16; $(($RANDOM % 16))" | bc); + for i in $(seq 1 64); do + FIXED_KEY+=$(echo "obase=16; $(($RANDOM % 16))" | bc); done; iniset $NOVA_CONF keymgr fixed_key "$FIXED_KEY" fi diff --git a/tests/functions.sh b/tests/functions.sh index 95dafe1028..06a4134abf 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -49,8 +49,7 @@ function test_enable_service() { ENABLED_SERVICES="$start" enable_service $add - if [ "$ENABLED_SERVICES" = "$finish" ] - then + if [ "$ENABLED_SERVICES" = "$finish" ]; then echo "OK: $start + $add -> $ENABLED_SERVICES" else echo "changing $start to $finish with $add failed: $ENABLED_SERVICES" @@ -76,8 +75,7 @@ function test_disable_service() { ENABLED_SERVICES="$start" disable_service "$del" - if [ "$ENABLED_SERVICES" = "$finish" ] - then + if [ "$ENABLED_SERVICES" = "$finish" ]; then echo "OK: $start - $del -> $ENABLED_SERVICES" else echo "changing $start to $finish with $del failed: $ENABLED_SERVICES" @@ -102,8 +100,7 @@ echo "Testing disable_all_services()" ENABLED_SERVICES=a,b,c disable_all_services -if [[ -z "$ENABLED_SERVICES" ]] -then +if [[ -z "$ENABLED_SERVICES" ]]; then echo "OK" else echo "disabling all services FAILED: $ENABLED_SERVICES" @@ -118,8 +115,7 @@ function test_disable_negated_services() { ENABLED_SERVICES="$start" disable_negated_services - if [ "$ENABLED_SERVICES" = "$finish" ] - then + if [ "$ENABLED_SERVICES" = "$finish" ]; then echo "OK: $start + $add -> $ENABLED_SERVICES" else echo "changing $start to $finish failed: $ENABLED_SERVICES" diff --git a/tools/bash8.py b/tools/bash8.py index 2623358182..9fb51ecc9e 100755 --- a/tools/bash8.py +++ b/tools/bash8.py @@ -21,9 +21,19 @@ # Currently Supported checks # # Errors +# Basic white space errors, for consistent indenting # - E001: check that lines do not end with trailing whitespace # - E002: ensure that indents are only spaces, and not hard tabs # - E003: ensure all indents are a multiple of 4 spaces +# +# Structure errors +# +# A set of rules that help keep things consistent in control blocks. +# These are ignored on long lines that have a continuation, because +# unrolling that is kind of "interesting" +# +# - E010: *do* not on the same line as *for* +# - E011: *then* not on the same line as *if* import argparse import fileinput @@ -51,6 +61,23 @@ def print_error(error, line): print(" - %s: L%s" % (fileinput.filename(), fileinput.filelineno())) +def not_continuation(line): + return not re.search('\\\\$', line) + +def check_for_do(line): + if not_continuation(line): + if re.search('^\s*for ', line): + if not re.search(';\s*do(\b|$)', line): + print_error('E010: Do not on same line as for', line) + + +def check_if_then(line): + if not_continuation(line): + if re.search('^\s*if \[', line): + if not re.search(';\s*then(\b|$)', line): + print_error('E011: Then non on same line as if', line) + + def check_no_trailing_whitespace(line): if re.search('[ \t]+$', line): print_error('E001: Trailing Whitespace', line) @@ -100,6 +127,8 @@ def check_files(files): check_no_trailing_whitespace(logical_line) check_indents(logical_line) + check_for_do(logical_line) + check_if_then(logical_line) def get_options(): diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 41b184c6ac..d172c7ba1b 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -194,8 +194,7 @@ function wait_for_VM_to_halt() { while true do state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted) - if [ -n "$state" ] - then + if [ -n "$state" ]; then break else echo -n "." diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index 7b0d891493..8412fdc3ca 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -63,8 +63,7 @@ get_params() ;; esac done - if [[ -z $BRIDGE ]] - then + if [[ -z $BRIDGE ]]; then BRIDGE=xenbr0 fi @@ -91,8 +90,7 @@ xe_min() find_network() { result=$(xe_min network-list bridge="$1") - if [ "$result" = "" ] - then + if [ "$result" = "" ]; then result=$(xe_min network-list name-label="$1") fi echo "$result" @@ -121,8 +119,7 @@ destroy_vifs() { local v="$1" IFS=, - for vif in $(xe_min vif-list vm-uuid="$v") - do + for vif in $(xe_min vif-list vm-uuid="$v"); do xe vif-destroy uuid="$vif" done unset IFS diff --git a/tools/xen/scripts/on_exit.sh b/tools/xen/scripts/on_exit.sh index a4db39c225..2441e3d84a 100755 --- a/tools/xen/scripts/on_exit.sh +++ b/tools/xen/scripts/on_exit.sh @@ -7,8 +7,7 @@ declare -a on_exit_hooks on_exit() { - for i in $(seq $((${#on_exit_hooks[*]} - 1)) -1 0) - do + for i in $(seq $((${#on_exit_hooks[*]} - 1)) -1 0); do eval "${on_exit_hooks[$i]}" done } @@ -17,8 +16,7 @@ add_on_exit() { local n=${#on_exit_hooks[*]} on_exit_hooks[$n]="$*" - if [[ $n -eq 0 ]] - then + if [[ $n -eq 0 ]]; then trap on_exit EXIT fi } diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh index 373d996760..838f86a525 100755 --- a/tools/xen/test_functions.sh +++ b/tools/xen/test_functions.sh @@ -227,16 +227,14 @@ function test_get_local_sr_path { } [ "$1" = "run_tests" ] && { - for testname in $($0) - do + for testname in $($0); do echo "$testname" before_each_test ( set -eux $testname ) - if [ "$?" != "0" ] - then + if [ "$?" != "0" ]; then echo "FAIL" exit 1 else From 86a8e9767912ae957cbbf6ea20a08106011a7728 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 4 Feb 2014 15:20:15 +0100 Subject: [PATCH 0432/4438] Add while/until to the for/do rule Like 'for/do' check that the while/until operator are on the same line with the do. Fixes some pep8 error along the way. Change-Id: I440afe60691263365bf35310bf4212d94f30c339 --- tools/bash8.py | 10 +++++++--- tools/create_userrc.sh | 3 +-- tools/xen/install_os_domU.sh | 3 +-- tools/xen/scripts/install-os-vpx.sh | 3 +-- 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/tools/bash8.py b/tools/bash8.py index 9fb51ecc9e..7552e0d642 100755 --- a/tools/bash8.py +++ b/tools/bash8.py @@ -47,7 +47,7 @@ def register_ignores(ignores): global IGNORE if ignores: - IGNORE='^(' + '|'.join(ignores.split(',')) + ')' + IGNORE = '^(' + '|'.join(ignores.split(',')) + ')' def should_ignore(error): @@ -64,11 +64,15 @@ def print_error(error, line): def not_continuation(line): return not re.search('\\\\$', line) + def check_for_do(line): if not_continuation(line): - if re.search('^\s*for ', line): + match = re.match('^\s*(for|while|until)\s', line) + if match: + operator = match.group(1).strip() if not re.search(';\s*do(\b|$)', line): - print_error('E010: Do not on same line as for', line) + print_error('E010: Do not on same line as %s' % operator, + line) def check_if_then(line): diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index 5f4c48660b..e2d855c4df 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -71,8 +71,7 @@ MODE="" ROLE=Member USER_NAME="" USER_PASS="" -while [ $# -gt 0 ] -do +while [ $# -gt 0 ]; do case "$1" in -h|--help) display_help; exit 0 ;; --os-username) export OS_USERNAME=$2; shift ;; diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index d172c7ba1b..d0d81a2d7e 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -191,8 +191,7 @@ function wait_for_VM_to_halt() { domid=$(xe vm-list name-label="$GUEST_NAME" params=dom-id minimal=true) port=$(xenstore-read /local/domain/$domid/console/vnc-port) echo "vncviewer -via root@$mgmt_ip localhost:${port:2}" - while true - do + while true; do state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted) if [ -n "$state" ]; then break diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index 8412fdc3ca..b9b65fdce2 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -42,8 +42,7 @@ EOF get_params() { - while getopts "hbn:r:l:t:" OPTION; - do + while getopts "hbn:r:l:t:" OPTION; do case $OPTION in h) usage exit 1 From d15c8a082464695a4e715bab093bf4d876bbc341 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Tue, 4 Feb 2014 12:38:14 +0000 Subject: [PATCH 0433/4438] Move install responsibilities to domU As we are moving forward to test XenAPI in the gate, it is necessary to move dom0 related modifications to be performed from domU. For this purpose, a new user is created, and that user should be used to talk to dom0 from domU. This change creates that user, makes it possible for dom0 to log in to domU with that account, and configure that account to be able to talk down to dom0. Also move several steps to the nova xenserver plugin: - dom0 plugin installation - create kernels and images directory - install console rotate script - configure a cron to execute console rotate script Configuration changes: A new configuration option, DOMZERO_USER has been created, that specifies a user account that is configured to be able to do passwordless ssh to dom0. Change-Id: If9de0b297a67b7cdb5de78d8dd0e8b2ca578b601 --- lib/nova_plugins/hypervisor-xenserver | 28 ++++++++++++++ stackrc | 4 ++ tools/xen/functions | 8 ++++ tools/xen/install_os_domU.sh | 51 ++++++++++++++++---------- tools/xen/prepare_guest.sh | 53 +++++++++++++++++++++++++++ tools/xen/prepare_guest_template.sh | 2 +- 6 files changed, 126 insertions(+), 20 deletions(-) diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver index f47994f187..9843261065 100644 --- a/lib/nova_plugins/hypervisor-xenserver +++ b/lib/nova_plugins/hypervisor-xenserver @@ -56,6 +56,34 @@ function configure_nova_hypervisor() { # Need to avoid crash due to new firewall support XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER" + + local dom0_ip + dom0_ip=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3-) + + local ssh_dom0 + ssh_dom0="sudo -u $DOMZERO_USER ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@$dom0_ip" + + # install nova plugins to dom0 + tar -czf - -C $NOVA_DIR/plugins/xenserver/xenapi/etc/xapi.d/plugins/ ./ | + $ssh_dom0 'tar -xzf - -C /etc/xapi.d/plugins/ && chmod a+x /etc/xapi.d/plugins/*' + + # install console logrotate script + tar -czf - -C $NOVA_DIR/tools/xenserver/ rotate_xen_guest_logs.sh | + $ssh_dom0 'tar -xzf - -C /root/ && chmod +x /root/rotate_xen_guest_logs.sh && mkdir -p /var/log/xen/guest' + + # Create a cron job that will rotate guest logs + $ssh_dom0 crontab - << CRONTAB +* * * * * /root/rotate_xen_guest_logs.sh +CRONTAB + + # Create directories for kernels and images + { + echo "set -eux" + cat $TOP_DIR/tools/xen/functions + echo "create_directory_for_images" + echo "create_directory_for_kernels" + } | $ssh_dom0 + } # install_nova_hypervisor() - Install external components diff --git a/stackrc b/stackrc index e89d25e4ab..db5b1889af 100644 --- a/stackrc +++ b/stackrc @@ -245,6 +245,10 @@ case "$VIRT_DRIVER" in xenserver) # Xen config common to nova and neutron XENAPI_USER=${XENAPI_USER:-"root"} + # This user will be used for dom0 - domU communication + # should be able to log in to dom0 without a password + # will be used to install the plugins + DOMZERO_USER=${DOMZERO_USER:-"domzero"} ;; *) ;; diff --git a/tools/xen/functions b/tools/xen/functions index 97c56bc1af..ab0be84bd2 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -336,3 +336,11 @@ function max_vcpus() { xe vm-param-set uuid=$vm VCPUs-max=$cpu_count xe vm-param-set uuid=$vm VCPUs-at-startup=$cpu_count } + +function get_domid() { + local vm_name_label + + vm_name_label="$1" + + xe vm-list name-label="$vm_name_label" params=dom-id minimal=true +} diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 41b184c6ac..663f09c1b4 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -67,21 +67,6 @@ fi # Install plugins -## Nova plugins -NOVA_ZIPBALL_URL=${NOVA_ZIPBALL_URL:-$(zip_snapshot_location $NOVA_REPO $NOVA_BRANCH)} -EXTRACTED_NOVA=$(extract_remote_zipball "$NOVA_ZIPBALL_URL") -install_xapi_plugins_from "$EXTRACTED_NOVA" - -LOGROT_SCRIPT=$(find "$EXTRACTED_NOVA" -name "rotate_xen_guest_logs.sh" -print) -if [ -n "$LOGROT_SCRIPT" ]; then - mkdir -p "/var/log/xen/guest" - cp "$LOGROT_SCRIPT" /root/consolelogrotate - chmod +x /root/consolelogrotate - echo "* * * * * /root/consolelogrotate" | crontab -fi - -rm -rf "$EXTRACTED_NOVA" - ## Install the netwrap xapi plugin to support agent control of dom0 networking if [[ "$ENABLED_SERVICES" =~ "q-agt" && "$Q_PLUGIN" = "openvswitch" ]]; then NEUTRON_ZIPBALL_URL=${NEUTRON_ZIPBALL_URL:-$(zip_snapshot_location $NEUTRON_REPO $NEUTRON_BRANCH)} @@ -90,9 +75,6 @@ if [[ "$ENABLED_SERVICES" =~ "q-agt" && "$Q_PLUGIN" = "openvswitch" ]]; then rm -rf "$EXTRACTED_NEUTRON" fi -create_directory_for_kernels -create_directory_for_images - # # Configure Networking # @@ -188,7 +170,7 @@ function wait_for_VM_to_halt() { set +x echo "Waiting for the VM to halt. Progress in-VM can be checked with vncviewer:" mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.') - domid=$(xe vm-list name-label="$GUEST_NAME" params=dom-id minimal=true) + domid=$(get_domid "$GUEST_NAME") port=$(xenstore-read /local/domain/$domid/console/vnc-port) echo "vncviewer -via root@$mgmt_ip localhost:${port:2}" while true @@ -361,6 +343,37 @@ else fi fi +# Create an ssh-keypair, and set it up for dom0 user +rm -f /root/dom0key /root/dom0key.pub +ssh-keygen -f /root/dom0key -P "" -C "dom0" +DOMID=$(get_domid "$GUEST_NAME") + +xenstore-write /local/domain/$DOMID/authorized_keys/$DOMZERO_USER "$(cat /root/dom0key.pub)" +xenstore-chmod -u /local/domain/$DOMID/authorized_keys/$DOMZERO_USER r$DOMID + +function run_on_appliance() { + ssh \ + -i /root/dom0key \ + -o UserKnownHostsFile=/dev/null \ + -o StrictHostKeyChecking=no \ + -o BatchMode=yes \ + "$DOMZERO_USER@$OS_VM_MANAGEMENT_ADDRESS" "$@" +} + +# Wait until we can log in to the appliance +while ! run_on_appliance true; do + sleep 1 +done + +# Remove authenticated_keys updater cronjob +echo "" | run_on_appliance crontab - + +# Generate a passwordless ssh key for domzero user +echo "ssh-keygen -f /home/$DOMZERO_USER/.ssh/id_rsa -C $DOMZERO_USER@appliance -N \"\" -q" | run_on_appliance + +# Authenticate that user to dom0 +run_on_appliance cat /home/$DOMZERO_USER/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys + # If we have copied our ssh credentials, use ssh to monitor while the installation runs WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1} COPYENV=${COPYENV:-1} diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index 05ac86cf99..094612624b 100755 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -18,6 +18,57 @@ set -o xtrace GUEST_PASSWORD="$1" XS_TOOLS_PATH="$2" STACK_USER="$3" +DOMZERO_USER="$4" + + +function setup_domzero_user() { + local username + + username="$1" + + local key_updater_script + local sudoers_file + key_updater_script="/home/$username/update_authorized_keys.sh" + sudoers_file="/etc/sudoers.d/allow_$username" + + # Create user + adduser --disabled-password --quiet "$username" --gecos "$username" + + # Give passwordless sudo + cat > $sudoers_file << EOF + $username ALL = NOPASSWD: ALL +EOF + chmod 0440 $sudoers_file + + # A script to populate this user's authenticated_keys from xenstore + cat > $key_updater_script << EOF +#!/bin/bash +set -eux + +DOMID=\$(sudo xenstore-read domid) +sudo xenstore-exists /local/domain/\$DOMID/authorized_keys/$username +sudo xenstore-read /local/domain/\$DOMID/authorized_keys/$username > /home/$username/xenstore_value +cat /home/$username/xenstore_value > /home/$username/.ssh/authorized_keys +EOF + + # Give the key updater to the user + chown $username:$username $key_updater_script + chmod 0700 $key_updater_script + + # Setup the .ssh folder + mkdir -p /home/$username/.ssh + chown $username:$username /home/$username/.ssh + chmod 0700 /home/$username/.ssh + touch /home/$username/.ssh/authorized_keys + chown $username:$username /home/$username/.ssh/authorized_keys + chmod 0600 /home/$username/.ssh/authorized_keys + + # Setup the key updater as a cron job + crontab -u $username - << EOF +* * * * * $key_updater_script +EOF + +} # Install basics apt-get update @@ -48,6 +99,8 @@ useradd $STACK_USER -s /bin/bash -d /opt/stack -G libvirtd echo $STACK_USER:$GUEST_PASSWORD | chpasswd echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +setup_domzero_user "$DOMZERO_USER" + # Add an udev rule, so that new block devices could be written by stack user cat > /etc/udev/rules.d/50-openstack-blockdev.rules << EOF KERNEL=="xvd[b-z]", GROUP="$STACK_USER", MODE="0660" diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh index 546ac99cd9..a25535dc22 100755 --- a/tools/xen/prepare_guest_template.sh +++ b/tools/xen/prepare_guest_template.sh @@ -76,7 +76,7 @@ cp $STAGING_DIR/etc/rc.local $STAGING_DIR/etc/rc.local.preparebackup cat <$STAGING_DIR/etc/rc.local #!/bin/sh -e bash /opt/stack/prepare_guest.sh \\ - "$GUEST_PASSWORD" "$XS_TOOLS_PATH" "$STACK_USER" \\ + "$GUEST_PASSWORD" "$XS_TOOLS_PATH" "$STACK_USER" "$DOMZERO_USER" \\ > /opt/stack/prepare_guest.log 2>&1 EOF From a7a23addd3634d890a44ff3e44ebefe29a3f7910 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Wed, 5 Feb 2014 15:19:27 -0600 Subject: [PATCH 0434/4438] Update orchestration-related service names in template catalog The orchestration-related service names were not consistent with the other AWS compatibility and native API names, so this change makes them consistent. Related-Bug: #1240138 Change-Id: I29a26bc6b0ddab0bff579a900e28da65df097a96 --- files/default_catalog.templates | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/files/default_catalog.templates b/files/default_catalog.templates index e64f68f033..ff00e38e09 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -50,12 +50,12 @@ catalog.RegionOne.image.name = Image Service catalog.RegionOne.cloudformation.publicURL = http://%SERVICE_HOST%:8000/v1 catalog.RegionOne.cloudformation.adminURL = http://%SERVICE_HOST%:8000/v1 catalog.RegionOne.cloudformation.internalURL = http://%SERVICE_HOST%:8000/v1 -catalog.RegionOne.cloudformation.name = Heat CloudFormation Service +catalog.RegionOne.cloudformation.name = CloudFormation service catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s -catalog.RegionOne.orchestration.name = Heat Service +catalog.RegionOne.orchestration.name = Orchestration Service catalog.RegionOne.metering.publicURL = http://%SERVICE_HOST%:8777/v1 catalog.RegionOne.metering.adminURL = http://%SERVICE_HOST%:8777/v1 From d5d4974cb72880799d7ec736237ca01eacb2f6da Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Thu, 6 Feb 2014 16:00:08 +0100 Subject: [PATCH 0435/4438] Cleanup cinder-rootwrap support cinder_rootwrap support in devstack handled a number of now-abandoned use cases: - no $CINDER_DIR/etc/cinder/rootwrap.d (old-style rootwrap) - using oslo-rootwrap instead of cinder-rootwrap (abandoned experiment) This change removes unused code paths and aligns configure_cinder_rootwrap() with configure_nova_rootwrap(). Change-Id: I387808dae0e064cc9c894c74ab78e86124f08dd2 --- lib/cinder | 53 +++++++++++++++++++---------------------------------- 1 file changed, 19 insertions(+), 34 deletions(-) diff --git a/lib/cinder b/lib/cinder index 9f70b2a0c9..75e9c97e80 100644 --- a/lib/cinder +++ b/lib/cinder @@ -170,43 +170,28 @@ function cleanup_cinder() { function configure_cinder_rootwrap() { # Set the paths of certain binaries CINDER_ROOTWRAP=$(get_rootwrap_location cinder) - if [[ ! -x $CINDER_ROOTWRAP ]]; then - CINDER_ROOTWRAP=$(get_rootwrap_location oslo) - if [[ ! -x $CINDER_ROOTWRAP ]]; then - die $LINENO "No suitable rootwrap found." - fi - fi - # If Cinder ships the new rootwrap filters files, deploy them - # (owned by root) and add a parameter to $CINDER_ROOTWRAP - ROOTWRAP_CINDER_SUDOER_CMD="$CINDER_ROOTWRAP" - if [[ -d $CINDER_DIR/etc/cinder/rootwrap.d ]]; then - # Wipe any existing rootwrap.d files first - if [[ -d $CINDER_CONF_DIR/rootwrap.d ]]; then - sudo rm -rf $CINDER_CONF_DIR/rootwrap.d - fi - # Deploy filters to /etc/cinder/rootwrap.d - sudo mkdir -m 755 $CINDER_CONF_DIR/rootwrap.d - sudo cp $CINDER_DIR/etc/cinder/rootwrap.d/*.filters $CINDER_CONF_DIR/rootwrap.d - sudo chown -R root:root $CINDER_CONF_DIR/rootwrap.d - sudo chmod 644 $CINDER_CONF_DIR/rootwrap.d/* - # Set up rootwrap.conf, pointing to /etc/cinder/rootwrap.d - if [[ -f $CINDER_DIR/etc/cinder/rootwrap.conf ]]; then - sudo cp $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR/ - else - # rootwrap.conf is no longer shipped in Cinder itself - echo "filters_path=" | sudo tee $CINDER_CONF_DIR/rootwrap.conf > /dev/null - fi - sudo sed -e "s:^filters_path=.*$:filters_path=$CINDER_CONF_DIR/rootwrap.d:" -i $CINDER_CONF_DIR/rootwrap.conf - sudo chown root:root $CINDER_CONF_DIR/rootwrap.conf - sudo chmod 0644 $CINDER_CONF_DIR/rootwrap.conf - # Specify rootwrap.conf as first parameter to rootwrap - CINDER_ROOTWRAP="$CINDER_ROOTWRAP $CINDER_CONF_DIR/rootwrap.conf" - ROOTWRAP_CINDER_SUDOER_CMD="$CINDER_ROOTWRAP *" + # Deploy new rootwrap filters files (owned by root). + # Wipe any existing rootwrap.d files first + if [[ -d $CINDER_CONF_DIR/rootwrap.d ]]; then + sudo rm -rf $CINDER_CONF_DIR/rootwrap.d fi - + # Deploy filters to /etc/cinder/rootwrap.d + sudo mkdir -m 755 $CINDER_CONF_DIR/rootwrap.d + sudo cp $CINDER_DIR/etc/cinder/rootwrap.d/*.filters $CINDER_CONF_DIR/rootwrap.d + sudo chown -R root:root $CINDER_CONF_DIR/rootwrap.d + sudo chmod 644 $CINDER_CONF_DIR/rootwrap.d/* + # Set up rootwrap.conf, pointing to /etc/cinder/rootwrap.d + sudo cp $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR/ + sudo sed -e "s:^filters_path=.*$:filters_path=$CINDER_CONF_DIR/rootwrap.d:" -i $CINDER_CONF_DIR/rootwrap.conf + sudo chown root:root $CINDER_CONF_DIR/rootwrap.conf + sudo chmod 0644 $CINDER_CONF_DIR/rootwrap.conf + # Specify rootwrap.conf as first parameter to rootwrap + ROOTWRAP_CSUDOER_CMD="$CINDER_ROOTWRAP $CINDER_CONF_DIR/rootwrap.conf *" + + # Set up the rootwrap sudoers for cinder TEMPFILE=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CINDER_SUDOER_CMD" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CSUDOER_CMD" >$TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap From 6c9430e5679c36ecdc827184cf160297458c4a3c Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Thu, 6 Feb 2014 17:06:00 +0000 Subject: [PATCH 0436/4438] Set uri_v3 in tempest config This properly sets the v3 uri for keystone in the tempest config. Previously tempest would just guess the v3 uri by replacing v2 with v3. However, moving forward this will no longer be the case so devstack should properly set this uri to enable tempest to use the keystone v3 api in addition to the v2. Change-Id: Ib02b2e9f24d8ca1f381186c48747ca0fbc45f3f1 --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 06183b107b..1eea9b6bb4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -251,6 +251,7 @@ function configure_tempest() { # Identity iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" + iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v3/" iniset $TEMPEST_CONFIG identity password "$password" iniset $TEMPEST_CONFIG identity alt_username $ALT_USERNAME iniset $TEMPEST_CONFIG identity alt_password "$password" From e4fa72132228688d2fe74dd974fe04b0fe4c3d6b Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 15 Jan 2014 15:04:49 -0600 Subject: [PATCH 0437/4438] Begin is_service_enabled() cleanup This converts the special cases in the is_service_enabled() function to call individual functions declared by the projects. This allows projects that are not in the DevStack repo and called via the extras.d plugin to handle an equivalent service alias. * Ceilometer * Cinder * Glance * Neutron * Nova * Swift TODO: remove the tests from is_service_enabled() after a transition period Patch Set 2: Rebased Change-Id: Ic78be433f93a9dd5f46be548bdbd4c984e0da6e7 --- clean.sh | 2 +- exercises/boot_from_volume.sh | 8 +++----- exercises/euca.sh | 5 ----- exercises/floating_ips.sh | 8 +++----- exercises/volumes.sh | 8 +++----- functions | 10 ++++++++++ lib/ceilometer | 9 ++++++++- lib/cinder | 8 ++++++++ lib/glance | 7 +++++++ lib/neutron | 7 +++++++ lib/nova | 14 ++++++++++++++ lib/swift | 7 +++++++ lib/template | 8 ++++++++ stack.sh | 2 +- stackrc | 2 +- unstack.sh | 2 +- 16 files changed, 82 insertions(+), 25 deletions(-) diff --git a/clean.sh b/clean.sh index e16bdb7f36..09f08dc8c2 100755 --- a/clean.sh +++ b/clean.sh @@ -97,7 +97,7 @@ if is_service_enabled ldap; then fi # Do the hypervisor cleanup until this can be moved back into lib/nova -if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then +if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then cleanup_nova_hypervisor fi diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index ed8ba6310e..79120460b8 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -30,14 +30,12 @@ TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions source $TOP_DIR/functions +# Import project functions +source $TOP_DIR/lib/cinder + # Import configuration source $TOP_DIR/openrc -# Import neutron functions if needed -if is_service_enabled neutron; then - source $TOP_DIR/lib/neutron -fi - # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/exercises/euca.sh b/exercises/euca.sh index 51b2644458..ad852a4f79 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -33,11 +33,6 @@ source $TOP_DIR/functions # Import EC2 configuration source $TOP_DIR/eucarc -# Import neutron functions if needed -if is_service_enabled neutron; then - source $TOP_DIR/lib/neutron -fi - # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 4ca90a5c35..b981aa8294 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -27,14 +27,12 @@ TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions source $TOP_DIR/functions +# Import project functions +source $TOP_DIR/lib/neutron + # Import configuration source $TOP_DIR/openrc -# Import neutron functions if needed -if is_service_enabled neutron; then - source $TOP_DIR/lib/neutron -fi - # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 21b5d21c04..33e24589eb 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -27,14 +27,12 @@ TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions source $TOP_DIR/functions +# Import project functions +source $TOP_DIR/lib/cinder + # Import configuration source $TOP_DIR/openrc -# Import neutron functions if needed -if is_service_enabled neutron; then - source $TOP_DIR/lib/neutron -fi - # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/functions b/functions index 281b6767c5..dc3278b56d 100644 --- a/functions +++ b/functions @@ -840,6 +840,16 @@ function is_service_enabled() { services=$@ for service in ${services}; do [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 + + # Look for top-level 'enabled' function for this service + if type is_${service}_enabled >/dev/null 2>&1; then + # A function exists for this service, use it + is_${service}_enabled + return $? + fi + + # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled() + # are implemented [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && return 0 [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0 [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0 diff --git a/lib/ceilometer b/lib/ceilometer index f9c76915d5..4ca77bb72b 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -59,7 +59,14 @@ TEMPEST_SERVICES+=,ceilometer # Functions # --------- -# + +# Test if any Ceilometer services are enabled +# is_ceilometer_enabled +function is_ceilometer_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"ceilometer-" ]] && return 0 + return 1 +} + # create_ceilometer_accounts() - Set up common required ceilometer accounts create_ceilometer_accounts() { diff --git a/lib/cinder b/lib/cinder index 9f70b2a0c9..3ec0fd4f09 100644 --- a/lib/cinder +++ b/lib/cinder @@ -85,6 +85,14 @@ TEMPEST_SERVICES+=,cinder # Functions # --------- + +# Test if any Cinder services are enabled +# is_cinder_enabled +function is_cinder_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"c-" ]] && return 0 + return 1 +} + # _clean_lvm_lv removes all cinder LVM volumes # # Usage: _clean_lvm_lv $VOLUME_GROUP $VOLUME_NAME_PREFIX diff --git a/lib/glance b/lib/glance index a5cb360743..1ebeeb3b2e 100644 --- a/lib/glance +++ b/lib/glance @@ -59,6 +59,13 @@ TEMPEST_SERVICES+=,glance # Functions # --------- +# Test if any Glance services are enabled +# is_glance_enabled +function is_glance_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"g-" ]] && return 0 + return 1 +} + # cleanup_glance() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_glance() { diff --git a/lib/neutron b/lib/neutron index 81db2a74d1..5bd38bcf73 100644 --- a/lib/neutron +++ b/lib/neutron @@ -244,6 +244,13 @@ TEMPEST_SERVICES+=,neutron # Functions # --------- +# Test if any Neutron services are enabled +# is_neutron_enabled +function is_neutron_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0 + return 1 +} + # configure_neutron() # Set common config for all neutron server and agents. function configure_neutron() { diff --git a/lib/nova b/lib/nova index dbc5c3db44..c6d99367c2 100644 --- a/lib/nova +++ b/lib/nova @@ -129,6 +129,20 @@ TEMPEST_SERVICES+=,nova # Functions # --------- +# Test if any Nova services are enabled +# is_nova_enabled +function is_nova_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"n-" ]] && return 0 + return 1 +} + +# Test if any Nova Cell services are enabled +# is_nova_enabled +function is_n-cell_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"n-cell-" ]] && return 0 + return 1 +} + # Helper to clean iptables rules function clean_iptables() { # Delete rules diff --git a/lib/swift b/lib/swift index 28ca8a80df..197c01b63c 100644 --- a/lib/swift +++ b/lib/swift @@ -118,6 +118,13 @@ TEMPEST_SERVICES+=,swift # Functions # --------- +# Test if any Swift services are enabled +# is_swift_enabled +function is_swift_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"s-" ]] && return 0 + return 1 +} + # cleanup_swift() - Remove residual data files function cleanup_swift() { rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} diff --git a/lib/template b/lib/template index 629e110271..b8e7c4d86f 100644 --- a/lib/template +++ b/lib/template @@ -10,6 +10,7 @@ # ``stack.sh`` calls the entry points in this order: # +# - is_XXXX_enabled # - install_XXXX # - configure_XXXX # - init_XXXX @@ -35,6 +36,13 @@ XXX_CONF_DIR=/etc/XXXX # Entry Points # ------------ +# Test if any XXXX services are enabled +# is_XXXX_enabled +function is_XXXX_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"XX-" ]] && return 0 + return 1 +} + # cleanup_XXXX() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_XXXX() { diff --git a/stack.sh b/stack.sh index 15e14303cf..d379d51c6e 100755 --- a/stack.sh +++ b/stack.sh @@ -1096,7 +1096,7 @@ if is_service_enabled s-proxy; then fi # Launch the Glance services -if is_service_enabled g-api g-reg; then +if is_service_enabled glance; then echo_summary "Starting Glance" start_glance fi diff --git a/stackrc b/stackrc index e89d25e4ab..2527b0ad84 100644 --- a/stackrc +++ b/stackrc @@ -35,7 +35,7 @@ fi # enable_service neutron # # Optional, to enable tempest configuration as part of devstack # enable_service tempest -ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,mysql +ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,mysql # Tell Tempest which services are available. The default is set here as # Tempest falls late in the configuration sequence. This differs from diff --git a/unstack.sh b/unstack.sh index 92d0642c38..c233f93e6b 100755 --- a/unstack.sh +++ b/unstack.sh @@ -104,7 +104,7 @@ if is_service_enabled nova; then stop_nova fi -if is_service_enabled g-api g-reg; then +if is_service_enabled glance; then stop_glance fi From dd710b4f12bb09abdc0dfa4a5f5c4aba81eba650 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Fri, 7 Feb 2014 16:46:17 +0000 Subject: [PATCH 0438/4438] Make neutron tempest run with tenant isolation by default This commit removes the workaround that switched tempest tenant isolation to false if Neutron was enabled. Recent changes to both neutron and tempest should make this safe finally. Change-Id: I929fcc73a7ef9a10f01af422ff62f9d451d52ae3 --- lib/tempest | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/tempest b/lib/tempest index 06183b107b..0fc0de26c8 100644 --- a/lib/tempest +++ b/lib/tempest @@ -266,11 +266,6 @@ function configure_tempest() { # Compute iniset $TEMPEST_CONFIG compute change_password_available False - # Note(nati) current tempest don't create network for each tenant - # so reuse same tenant for now - if is_service_enabled neutron; then - TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-False} - fi iniset $TEMPEST_CONFIG compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED iniset $TEMPEST_CONFIG compute network_for_ssh $PRIVATE_NETWORK_NAME From 67db4a9bd5c0d0a119c244e8dbb1a0a1990944b8 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 7 Feb 2014 16:02:37 -0500 Subject: [PATCH 0439/4438] remove database init from horizon horizon default config no longer uses a session database, it uses signed cookies instead, so we can stop doing db initialization and cleanup (which based on errexit we weren't doing correctly anyway). Change-Id: Icae4318e2784486db2888cbf353e95ac9a5d7cba --- lib/horizon | 9 --------- 1 file changed, 9 deletions(-) diff --git a/lib/horizon b/lib/horizon index c64d8502ba..2f5795d1ca 100644 --- a/lib/horizon +++ b/lib/horizon @@ -81,9 +81,6 @@ function configure_horizon() { # init_horizon() - Initialize databases, etc. function init_horizon() { - # Remove stale session database. - rm -f $HORIZON_DIR/openstack_dashboard/local/dashboard_openstack.sqlite3 - # ``local_settings.py`` is used to override horizon default settings. local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py cp $HORIZON_SETTINGS $local_settings @@ -106,12 +103,6 @@ function init_horizon() { _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_vpn True fi - # Initialize the horizon database (it stores sessions and notices shown to - # users). The user system is external (keystone). - cd $HORIZON_DIR - python manage.py syncdb --noinput - cd $TOP_DIR - # Create an empty directory that apache uses as docroot sudo mkdir -p $HORIZON_DIR/.blackhole From 5ed43bf82ac9eeb30ca543bcf695f9d45ddf77f5 Mon Sep 17 00:00:00 2001 From: Shane Wang Date: Fri, 7 Feb 2014 11:01:43 +0800 Subject: [PATCH 0440/4438] Fix misspellings in devstack Fix misspellings detected by: * pip install misspellings * git ls-files | grep -v locale | misspellings -f - Change-Id: I19726438d15cd27b813504aac530e7e53c4def12 Closes-Bug: #1257295 --- tools/xen/xenrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/xenrc b/tools/xen/xenrc index cd282341cb..b355a10d4f 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -35,7 +35,7 @@ XEN_INT_BRIDGE_OR_NET_NAME="OpenStack VM Integration Network" GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} # Extracted variables for OpenStack VM network device numbers. -# Make sure, they form a continous sequence starting from 0 +# Make sure they form a continuous sequence starting from 0 MGT_DEV_NR=0 VM_DEV_NR=1 PUB_DEV_NR=2 From d1cd0c66487cc00fa50c6638fb233e04b023d744 Mon Sep 17 00:00:00 2001 From: Daniel Kuffner Date: Sat, 8 Feb 2014 12:35:48 +0100 Subject: [PATCH 0441/4438] Docker install script fails to install docker registry The tools/docker/install_docker.sh script fails during the installation/setup of the docker registry. The problem is that the used environment variables are wrong. REGISTRY_IMAGE > DOCKER_REGISTRY_IMAGE REGISTRY_IMAGE_NAME > DOCKER_REGISTRY_IMAGE_NAME Change-Id: I16f051abe5c426f295c69d518b49c1b9a7b4cc94 --- tools/docker/install_docker.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh index 4fa23864fb..b9e1b242dd 100755 --- a/tools/docker/install_docker.sh +++ b/tools/docker/install_docker.sh @@ -60,5 +60,5 @@ docker pull $DOCKER_IMAGE docker tag $DOCKER_IMAGE $DOCKER_IMAGE_NAME # Get docker-registry image -docker pull $REGISTRY_IMAGE -docker tag $REGISTRY_IMAGE $REGISTRY_IMAGE_NAME +docker pull $DOCKER_REGISTRY_IMAGE +docker tag $DOCKER_REGISTRY_IMAGE $DOCKER_REGISTRY_IMAGE_NAME From d73af8787280002321ab52a3262a2d0b5a8e54cd Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 6 Feb 2014 15:33:52 -0800 Subject: [PATCH 0442/4438] If n-api-meta is being run, remove from NOVA_ENABLED_APIS If running n-api-meta as a separate service we shouldn't run it inside of n-api. This patch is in support of Iddd44f7ee43b9287a788dea49eaa484316f8da04 Change-Id: I8a54cf13dc6083b78e89c9ea5413d9e4d8d4b37a Related-Bug: #1270845 --- lib/nova | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/nova b/lib/nova index dbc5c3db44..3ee28faaec 100644 --- a/lib/nova +++ b/lib/nova @@ -389,6 +389,10 @@ function create_nova_conf() { fi if is_service_enabled n-api; then + if is_service_enabled n-api-meta; then + # If running n-api-meta as a separate service + NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//") + fi iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS" if is_service_enabled tls-proxy; then # Set the service port for a proxy to take the original From 9e032c2d374f80612c010775dd8d71389d5d09a3 Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Mon, 10 Feb 2014 11:36:25 +0100 Subject: [PATCH 0443/4438] read_password needs to store in .localrc.auto if local.conf is used. when running stack.sh with no passwords in local.conf read_password() creates localrc and local.conf is ignored Change-Id: I25ad07569d2b42b190449591d5a01ade8022392c --- stack.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 1a1460d2f3..303541d63e 100755 --- a/stack.sh +++ b/stack.sh @@ -362,7 +362,11 @@ function read_password { var=$1; msg=$2 pw=${!var} - localrc=$TOP_DIR/localrc + if [[ -f $RC_DIR/localrc ]]; then + localrc=$TOP_DIR/localrc + else + localrc=$TOP_DIR/.localrc.auto + fi # If the password is not defined yet, proceed to prompt user for a password. if [ ! $pw ]; then From 6b1cb10809ae4c2cc9a4b39e0298458f0ecd4853 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Mon, 10 Feb 2014 09:59:43 -0800 Subject: [PATCH 0444/4438] Add cliff, pycadf, stevedore, & taskflow from oslo Oslo has adopted 4 libraries that were previously on stackforge, so we can now install them from source. Change-Id: I6b6e20a7884b47ade466fc38641a5ac1a5f3e146 --- lib/oslo | 16 ++++++++++++++++ stackrc | 16 ++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/lib/oslo b/lib/oslo index f644ed76c3..b089842ae4 100644 --- a/lib/oslo +++ b/lib/oslo @@ -20,9 +20,13 @@ set +o xtrace # Defaults # -------- +CLIFF_DIR=$DEST/cliff OSLOCFG_DIR=$DEST/oslo.config OSLOMSG_DIR=$DEST/oslo.messaging OSLORWRAP_DIR=$DEST/oslo.rootwrap +PYCADF_DIR=$DEST/pycadf +STEVEDORE_DIR=$DEST/stevedore +TASKFLOW_DIR=$DEST/taskflow # Entry Points # ------------ @@ -33,6 +37,9 @@ function install_oslo() { # for a smoother transition of existing users. cleanup_oslo + git_clone $CLIFF_REPO $CLIFF_DIR $CLIFF_BRANCH + setup_develop $CLIFF_DIR + git_clone $OSLOCFG_REPO $OSLOCFG_DIR $OSLOCFG_BRANCH setup_develop $OSLOCFG_DIR @@ -41,6 +48,15 @@ function install_oslo() { git_clone $OSLORWRAP_REPO $OSLORWRAP_DIR $OSLORWRAP_BRANCH setup_develop $OSLORWRAP_DIR + + git_clone $PYCADF_REPO $PYCADF_DIR $PYCADF_BRANCH + setup_develop $PYCADF_DIR + + git_clone $STEVEDORE_REPO $STEVEDORE_DIR $STEVEDORE_BRANCH + setup_develop $STEVEDORE_DIR + + git_clone $TASKFLOW_REPO $TASKFLOW_DIR $TASKFLOW_BRANCH + setup_develop $TASKFLOW_DIR } # cleanup_oslo() - purge possibly old versions of oslo diff --git a/stackrc b/stackrc index 7eed60cb2c..729c2f5b40 100644 --- a/stackrc +++ b/stackrc @@ -140,6 +140,10 @@ NOVACLIENT_BRANCH=${NOVACLIENT_BRANCH:-master} OPENSTACKCLIENT_REPO=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git} OPENSTACKCLIENT_BRANCH=${OPENSTACKCLIENT_BRANCH:-master} +# cliff command line framework +CLIFF_REPO=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git} +CLIFF_BRANCH=${CLIFF_BRANCH:-master} + # oslo.config OSLOCFG_REPO=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git} OSLOCFG_BRANCH=${OSLOCFG_BRANCH:-master} @@ -152,6 +156,18 @@ OSLOMSG_BRANCH=${OSLOMSG_BRANCH:-master} OSLORWRAP_REPO=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git} OSLORWRAP_BRANCH=${OSLORWRAP_BRANCH:-master} +# pycadf auditing library +PYCADF_REPO=${PYCADF_REPO:-${GIT_BASE}/openstack/pycadf.git} +PYCADF_BRANCH=${PYCADF_BRANCH:-master} + +# stevedore plugin manager +STEVEDORE_REPO=${STEVEDORE_REPO:-${GIT_BASE}/openstack/stevedore.git} +STEVEDORE_BRANCH=${STEVEDORE_BRANCH:-master} + +# taskflow plugin manager +TASKFLOW_REPO=${TASKFLOW_REPO:-${GIT_BASE}/openstack/taskflow.git} +TASKFLOW_BRANCH=${TASKFLOW_BRANCH:-master} + # pbr drives the setuptools configs PBR_REPO=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git} PBR_BRANCH=${PBR_BRANCH:-master} From 9972ec23c43cea1be6ee5174e72c06e32f295212 Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Mon, 10 Feb 2014 11:22:39 -0500 Subject: [PATCH 0445/4438] Add marconi to enabled services This patch adds marconi to enabled services. This is needed to run the tempest experimental job for marconi. Change-Id: I28794c3acacc6daa9f698f8031b58d1ee13c3bad Implements: blueprint add-basic-marconi-tests --- lib/marconi | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/marconi b/lib/marconi index 1eaebbdf16..d1ab5f3a5c 100644 --- a/lib/marconi +++ b/lib/marconi @@ -58,6 +58,13 @@ TEMPEST_SERVICES+=,marconi # Functions # --------- +# Test if any Marconi services are enabled +# is_marconi_enabled +function is_marconi_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"marconi-" ]] && return 0 + return 1 +} + # cleanup_marconi() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_marconi() { From ae90f67e05a93e7b69cd019f6c50fa20405edb68 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Mon, 10 Feb 2014 14:23:54 -0500 Subject: [PATCH 0446/4438] Stop catting tempest.config during tempest setup This commit removes the 'cat tempest.config' from lib/tempest. There is no reason to cat it as part of running devstack because the file is and can be interacted with after devstack finishes running. To prevent a loss of information in the gate this change should be coupled with devstack-gate change: Ifb36918cd0d686cb3865f5322cd62c209acaaf30 which copies the tempest.config file with the other test artifacts. Change-Id: Ia01cd53660b3490ea9faa9e9c746bafd9df12a9b --- lib/tempest | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index 76da17062c..c8eebfcf05 100644 --- a/lib/tempest +++ b/lib/tempest @@ -348,9 +348,6 @@ function configure_tempest() { fi done - echo "Created tempest configuration file:" - cat $TEMPEST_CONFIG - # Restore IFS IFS=$ifs #Restore errexit From bc76f748ebfc57f5af3e006f4092ae574b8febfe Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 10 Feb 2014 21:11:04 -0500 Subject: [PATCH 0447/4438] remove build_tempest we haven't actually used this script in about a year, in favor of the actual in tree lib/tempest. Change-Id: I9d78b395846ebe833a38ba50edae226040cd7f45 --- tools/build_tempest.sh | 53 ------------------------------------------ 1 file changed, 53 deletions(-) delete mode 100755 tools/build_tempest.sh diff --git a/tools/build_tempest.sh b/tools/build_tempest.sh deleted file mode 100755 index 6c527f5962..0000000000 --- a/tools/build_tempest.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash -# -# **build_tempest.sh** - -# Checkout and prepare a Tempest repo: git://git.openstack.org/openstack/tempest.git - -function usage { - echo "$0 - Check out and prepare a Tempest repo" - echo "" - echo "Usage: $0" - exit 1 -} - -if [ "$1" = "-h" ]; then - usage -fi - -# Clean up any resources that may be in use -cleanup() { - set +o errexit - - # Kill ourselves to signal any calling process - trap 2; kill -2 $$ -} - -trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $TOOLS_DIR/..; pwd) - -# Import common functions -. $TOP_DIR/functions - -# Abort if localrc is not set -if [ ! -e $TOP_DIR/localrc ]; then - echo "You must have a localrc with ALL necessary passwords and configuration defined before proceeding." - echo "See stack.sh for required passwords." - exit 1 -fi - -# Source params -source ./stackrc - -# Where Openstack code lives -DEST=${DEST:-/opt/stack} - -TEMPEST_DIR=$DEST/tempest - -# Install tests and prerequisites -git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH - -trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT From 0b9776d2f34197d1e920e1dc8506b8f8c31452ca Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Tue, 28 Jan 2014 11:20:53 -0500 Subject: [PATCH 0448/4438] Install glance images before starting Nova The docker driver for Nova needs a registry service to be running. It is being run inside a container using an image -- that image must be downloaded. The registry service must be started via nova_plugins/hypervisor-docker, but this is presently called before Glance's image download. The reordering is being done such that Glance may download the registry image, but prior to starting Nova such that "hypervisor-docker" may have an image downloaded and available to launch the registry. This change should cause no negative effects on other hypervisors. Change-Id: I7bccb42517e4c6187f2a90c64f39cda4577f89a3 blueprint: docker-glance-uploads --- stack.sh | 82 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/stack.sh b/stack.sh index 303541d63e..78cfbc58ce 100755 --- a/stack.sh +++ b/stack.sh @@ -1090,6 +1090,47 @@ if is_service_enabled g-api g-reg; then start_glance fi +# Install Images +# ============== + +# Upload an image to glance. +# +# The default image is cirros, a small testing image which lets you login as **root** +# cirros has a ``cloud-init`` analog supporting login via keypair and sending +# scripts as userdata. +# See https://help.ubuntu.com/community/CloudInit for more on cloud-init +# +# Override ``IMAGE_URLS`` with a comma-separated list of UEC images. +# * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz + +if is_service_enabled g-reg; then + TOKEN=$(keystone token-get | grep ' id ' | get_field 2) + die_if_not_set $LINENO TOKEN "Keystone fail to get token" + + if is_baremetal; then + echo_summary "Creating and uploading baremetal images" + + # build and upload separate deploy kernel & ramdisk + upload_baremetal_deploy $TOKEN + + # upload images, separating out the kernel & ramdisk for PXE boot + for image_url in ${IMAGE_URLS//,/ }; do + upload_baremetal_image $image_url $TOKEN + done + else + echo_summary "Uploading images" + + # Option to upload legacy ami-tty, which works with xenserver + if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then + IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz" + fi + + for image_url in ${IMAGE_URLS//,/ }; do + upload_image $image_url $TOKEN + done + fi +fi + # Create an access key and secret key for nova ec2 register image if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1) @@ -1195,47 +1236,6 @@ if is_service_enabled nova && is_service_enabled key; then fi -# Install Images -# ============== - -# Upload an image to glance. -# -# The default image is cirros, a small testing image which lets you login as **root** -# cirros has a ``cloud-init`` analog supporting login via keypair and sending -# scripts as userdata. -# See https://help.ubuntu.com/community/CloudInit for more on cloud-init -# -# Override ``IMAGE_URLS`` with a comma-separated list of UEC images. -# * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz - -if is_service_enabled g-reg; then - TOKEN=$(keystone token-get | grep ' id ' | get_field 2) - die_if_not_set $LINENO TOKEN "Keystone fail to get token" - - if is_baremetal; then - echo_summary "Creating and uploading baremetal images" - - # build and upload separate deploy kernel & ramdisk - upload_baremetal_deploy $TOKEN - - # upload images, separating out the kernel & ramdisk for PXE boot - for image_url in ${IMAGE_URLS//,/ }; do - upload_baremetal_image $image_url $TOKEN - done - else - echo_summary "Uploading images" - - # Option to upload legacy ami-tty, which works with xenserver - if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then - IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz" - fi - - for image_url in ${IMAGE_URLS//,/ }; do - upload_image $image_url $TOKEN - done - fi -fi - # If we are running nova with baremetal driver, there are a few # last-mile configuration bits to attend to, which must happen # after n-api and n-sch have started. From 97ce935a9244956fd977cd1eb62e7b429e5cb141 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Fri, 31 Jan 2014 01:40:50 -0500 Subject: [PATCH 0449/4438] Update docker driver to use a CirrOS image For purposes of matching the VM image used in devstack across hypervisors, set the default container image for Docker to cirros. This uses the CirrOS image from stackbrew, the "standard library" for Docker. Change-Id: I9d767a4e06c5caa7b92ffea25e6a9aeda9bf282a --- lib/nova_plugins/hypervisor-docker | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index bb934b87d6..cdd9317761 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -31,8 +31,8 @@ DOCKER_UNIX_SOCKET=/var/run/docker.sock DOCKER_PID_FILE=/var/run/docker.pid DOCKER_REGISTRY_PORT=${DOCKER_REGISTRY_PORT:-5042} -DOCKER_IMAGE=${DOCKER_IMAGE:-busybox:latest} -DOCKER_IMAGE_NAME=busybox +DOCKER_IMAGE=${DOCKER_IMAGE:-cirros:latest} +DOCKER_IMAGE_NAME=cirros DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-registry:latest} DOCKER_REGISTRY_IMAGE_NAME=registry DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME} From d0860cc26d78c3f1c70b332ecc793442a1c8048d Mon Sep 17 00:00:00 2001 From: john-griffith Date: Thu, 23 Jan 2014 11:31:10 -0700 Subject: [PATCH 0450/4438] Replace custom cinder driver configs The devstack/lib/cinder file has a number of third party driver config options hard-coded in it. Rather than add yet another if driver== statement here let's use plugin files and do something similar to what's already in place for nova_hypervisors and neutron plugins. This works the same way folks were implementing their drivers already, the key is to use a CINDER_DRIVER variable in your localrc file that matches the name of the lib/cinder_plugin file to use. The existing third party driver entries that were in lib/cinder have been migrated to cooresponding plugin files. Change-Id: I4ee51ea542d5aa63879afd5297311a9df727c57f --- lib/cinder | 44 ++++++------------------------- lib/cinder_plugins/XenAPINFS | 44 +++++++++++++++++++++++++++++++ lib/cinder_plugins/glusterfs | 50 ++++++++++++++++++++++++++++++++++++ lib/cinder_plugins/nfs | 42 ++++++++++++++++++++++++++++++ lib/cinder_plugins/sheepdog | 39 ++++++++++++++++++++++++++++ lib/cinder_plugins/solidfire | 48 ++++++++++++++++++++++++++++++++++ lib/cinder_plugins/vsphere | 42 ++++++++++++++++++++++++++++++ 7 files changed, 273 insertions(+), 36 deletions(-) create mode 100644 lib/cinder_plugins/XenAPINFS create mode 100644 lib/cinder_plugins/glusterfs create mode 100644 lib/cinder_plugins/nfs create mode 100644 lib/cinder_plugins/sheepdog create mode 100644 lib/cinder_plugins/solidfire create mode 100644 lib/cinder_plugins/vsphere diff --git a/lib/cinder b/lib/cinder index 9f70b2a0c9..51eb3c1262 100644 --- a/lib/cinder +++ b/lib/cinder @@ -27,6 +27,12 @@ set +o xtrace # set up default driver CINDER_DRIVER=${CINDER_DRIVER:-default} +CINDER_PLUGINS=$TOP_DIR/lib/cinder_plugins + +# grab plugin config if specified via cinder_driver +if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then + source $CINDER_PLUGINS/$CINDER_DRIVER +fi # set up default directories CINDER_DIR=$DEST/cinder @@ -300,42 +306,8 @@ function configure_cinder() { setup_colorized_logging $CINDER_CONF DEFAULT "project_id" "user_id" fi - if [ "$CINDER_DRIVER" == "XenAPINFS" ]; then - ( - set -u - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver" - iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL" - iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME" - iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD" - iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" - iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" - ) - elif [ "$CINDER_DRIVER" == "nfs" ]; then - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.nfs.NfsDriver" - iniset $CINDER_CONF DEFAULT nfs_shares_config "$CINDER_CONF_DIR/nfs_shares.conf" - echo "$CINDER_NFS_SERVERPATH" | sudo tee "$CINDER_CONF_DIR/nfs_shares.conf" - sudo chmod 666 $CINDER_CONF_DIR/nfs_shares.conf - elif [ "$CINDER_DRIVER" == "sheepdog" ]; then - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver" - elif [ "$CINDER_DRIVER" == "glusterfs" ]; then - # To use glusterfs, set the following in localrc: - # CINDER_DRIVER=glusterfs - # CINDER_GLUSTERFS_SHARES="127.0.0.1:/vol1;127.0.0.1:/vol2" - # Shares are : and separated by semicolons. - - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.glusterfs.GlusterfsDriver" - iniset $CINDER_CONF DEFAULT glusterfs_shares_config "$CINDER_CONF_DIR/glusterfs_shares" - touch $CINDER_CONF_DIR/glusterfs_shares - if [ ! -z "$CINDER_GLUSTERFS_SHARES" ]; then - CINDER_GLUSTERFS_SHARES=$(echo $CINDER_GLUSTERFS_SHARES | tr ";" "\n") - echo "$CINDER_GLUSTERFS_SHARES" > $CINDER_CONF_DIR/glusterfs_shares - fi - elif [ "$CINDER_DRIVER" == "vsphere" ]; then - echo_summary "Using VMware vCenter driver" - iniset $CINDER_CONF DEFAULT vmware_host_ip "$VMWAREAPI_IP" - iniset $CINDER_CONF DEFAULT vmware_host_username "$VMWAREAPI_USER" - iniset $CINDER_CONF DEFAULT vmware_host_password "$VMWAREAPI_PASSWORD" - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver" + if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then + configure_cinder_driver fi if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then diff --git a/lib/cinder_plugins/XenAPINFS b/lib/cinder_plugins/XenAPINFS new file mode 100644 index 0000000000..72e1c1398c --- /dev/null +++ b/lib/cinder_plugins/XenAPINFS @@ -0,0 +1,44 @@ +# lib/cinder_plugins/XenAPINFS +# Configure the XenAPINFS driver + +# Enable with: +# +# CINDER_DRIVER=XenAPINFS + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver() { + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver" + iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL" + iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME" + iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD" + iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" + iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" +} + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/glusterfs b/lib/cinder_plugins/glusterfs new file mode 100644 index 0000000000..a0c5ae8d5e --- /dev/null +++ b/lib/cinder_plugins/glusterfs @@ -0,0 +1,50 @@ +# lib/cinder_plugins/glusterfs +# Configure the glusterfs driver + +# Enable with: +# +# CINDER_DRIVER=glusterfs + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver() { + # To use glusterfs, set the following in localrc: + # CINDER_DRIVER=glusterfs + # CINDER_GLUSTERFS_SHARES="127.0.0.1:/vol1;127.0.0.1:/vol2" + # Shares are : and separated by semicolons. + + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.glusterfs.GlusterfsDriver" + iniset $CINDER_CONF DEFAULT glusterfs_shares_config "$CINDER_CONF_DIR/glusterfs_shares" + touch $CINDER_CONF_DIR/glusterfs_shares + if [ ! -z "$CINDER_GLUSTERFS_SHARES" ]; then + CINDER_GLUSTERFS_SHARES=$(echo $CINDER_GLUSTERFS_SHARES | tr ";" "\n") + echo "$CINDER_GLUSTERFS_SHARES" > $CINDER_CONF_DIR/glusterfs_shares + fi +} + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/nfs b/lib/cinder_plugins/nfs new file mode 100644 index 0000000000..ea2c9ce665 --- /dev/null +++ b/lib/cinder_plugins/nfs @@ -0,0 +1,42 @@ +# lib/cinder_plugins/nfs +# Configure the nfs driver + +# Enable with: +# +# CINDER_DRIVER=nfs + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver() { + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.nfs.NfsDriver" + iniset $CINDER_CONF DEFAULT nfs_shares_config "$CINDER_CONF_DIR/nfs_shares.conf" + echo "$CINDER_NFS_SERVERPATH" | sudo tee "$CINDER_CONF_DIR/nfs_shares.conf" + sudo chmod 660 $CINDER_CONF_DIR/nfs_shares.conf +} + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/sheepdog b/lib/cinder_plugins/sheepdog new file mode 100644 index 0000000000..4435932371 --- /dev/null +++ b/lib/cinder_plugins/sheepdog @@ -0,0 +1,39 @@ +# lib/cinder_plugins/sheepdog +# Configure the sheepdog driver + +# Enable with: +# +# CINDER_DRIVER=sheepdog + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver() { + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver" +} + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/solidfire b/lib/cinder_plugins/solidfire new file mode 100644 index 0000000000..47c113e1a2 --- /dev/null +++ b/lib/cinder_plugins/solidfire @@ -0,0 +1,48 @@ +# lib/cinder_plugins/solidfire +# Configure the solidfire driver + +# Enable with: +# +# CINDER_DRIVER=solidfire + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver() { + # To use solidfire, set the following in localrc: + # CINDER_DRIVER=solidfire + # SAN_IP= + # SAN_LOGIN= + # SAN_PASSWORD= + + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.solidfire.SolidFireDriver" + iniset $CINDER_CONF DEFAULT san_ip $SAN_IP + iniset $CINDER_CONF DEFAULT san_login $SAN_LOGIN + iniset $CINDER_CONF DEFAULT san_password $SAN_PASSWORD +} + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/vsphere b/lib/cinder_plugins/vsphere new file mode 100644 index 0000000000..c8cab6a8c1 --- /dev/null +++ b/lib/cinder_plugins/vsphere @@ -0,0 +1,42 @@ +# lib/cinder_plugins/vsphere +# Configure the vsphere driver + +# Enable with: +# +# CINDER_DRIVER=vsphere + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver() { + iniset $CINDER_CONF DEFAULT vmware_host_ip "$VMWAREAPI_IP" + iniset $CINDER_CONF DEFAULT vmware_host_username "$VMWAREAPI_USER" + iniset $CINDER_CONF DEFAULT vmware_host_password "$VMWAREAPI_PASSWORD" + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver" +} + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: From 062cdaf84c11fbbef71cab1db833c4aac3baadbf Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 10 Feb 2014 22:24:49 -0500 Subject: [PATCH 0451/4438] add dstat to see top process info pidstat data isn't exceptionally useful, it's lots of lines, and seems to be missing some of the most critical one. dstat is kind of like sysstat, except the formatting is much better. It also supports advanced features like the top CPU using process at every interval. put this behind the sysstat variable, as we'll probably want to replace sysstat & pidstat with this if it works Change-Id: I48dc22a0a7e63fe3abb527646cc70525998a7d85 --- files/apts/dstat | 1 + files/rpms-suse/dstat | 1 + files/rpms/dstat | 1 + stack.sh | 12 ++++++++++++ 4 files changed, 15 insertions(+) create mode 100644 files/apts/dstat create mode 100644 files/rpms-suse/dstat create mode 100644 files/rpms/dstat diff --git a/files/apts/dstat b/files/apts/dstat new file mode 100644 index 0000000000..2b643b8b1b --- /dev/null +++ b/files/apts/dstat @@ -0,0 +1 @@ +dstat diff --git a/files/rpms-suse/dstat b/files/rpms-suse/dstat new file mode 100644 index 0000000000..2b643b8b1b --- /dev/null +++ b/files/rpms-suse/dstat @@ -0,0 +1 @@ +dstat diff --git a/files/rpms/dstat b/files/rpms/dstat new file mode 100644 index 0000000000..8a8f8fe737 --- /dev/null +++ b/files/rpms/dstat @@ -0,0 +1 @@ +dstat \ No newline at end of file diff --git a/stack.sh b/stack.sh index 303541d63e..1dfd4ddbc6 100755 --- a/stack.sh +++ b/stack.sh @@ -298,6 +298,8 @@ SYSLOG_PORT=${SYSLOG_PORT:-516} SYSSTAT_FILE=${SYSSTAT_FILE:-"sysstat.dat"} SYSSTAT_INTERVAL=${SYSSTAT_INTERVAL:-"1"} +DSTAT_FILE=${DSTAT_FILE:-"dstat.txt"} + PIDSTAT_FILE=${PIDSTAT_FILE:-"pidstat.txt"} PIDSTAT_INTERVAL=${PIDSTAT_INTERVAL:-"5"} @@ -879,6 +881,16 @@ if is_service_enabled sysstat; then fi fi +if is_service_enabled dstat; then + # Per-process stats + DSTAT_OPTS="-tcndylp --top-cpu-adv" + if [[ -n ${SCREEN_LOGDIR} ]]; then + screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE" + else + screen_it dstat "dstat $DSTAT_OPTS" + fi +fi + if is_service_enabled pidstat; then # Per-process stats PIDSTAT_OPTS="-l -p ALL -T ALL" From c86ec3568c7ed11ce38584b654b91594eb0d235a Mon Sep 17 00:00:00 2001 From: Yuriy Zveryanskyy Date: Wed, 12 Feb 2014 11:03:18 +0200 Subject: [PATCH 0452/4438] Fix hook script for Ironic cleanup_ironic should not be started on "unstack" phase, otherwise API service not restarted because auth cache directory removed. Change-Id: I3da86b9fb8c3ce1185aff05df0fa83cf259708f4 --- extras.d/50-ironic.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/extras.d/50-ironic.sh b/extras.d/50-ironic.sh index f68a14680f..9e61dc5d78 100644 --- a/extras.d/50-ironic.sh +++ b/extras.d/50-ironic.sh @@ -28,6 +28,9 @@ if is_service_enabled ir-api ir-cond; then if [[ "$1" == "unstack" ]]; then stop_ironic + fi + + if [[ "$1" == "clean" ]]; then cleanup_ironic fi fi From 2b69f23625f988d17574d746773e2932ca109427 Mon Sep 17 00:00:00 2001 From: tanlin Date: Wed, 12 Feb 2014 16:11:32 +0800 Subject: [PATCH 0453/4438] Rename Openstack to OpenStack Change-Id: I78ac040e926ef8040ee674b6fea3223a8ab4ae99 --- openrc | 2 +- tools/create_userrc.sh | 2 +- tools/jenkins/README.md | 2 +- tools/xen/README.md | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/openrc b/openrc index 784b00e51b..fc066ad82c 100644 --- a/openrc +++ b/openrc @@ -67,7 +67,7 @@ GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} # Identity API version export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0} -# Authenticating against an Openstack cloud using Keystone returns a **Token** +# Authenticating against an OpenStack cloud using Keystone returns a **Token** # and **Service Catalog**. The catalog contains the endpoints for all services # the user/tenant has access to - including nova, glance, keystone, swift, ... # We currently recommend using the 2.0 *identity api*. diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index e2d855c4df..1c9565145b 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -199,7 +199,7 @@ export EC2_URL="$EC2_URL" export S3_URL="$S3_URL" # OpenStack USER ID = $user_id export OS_USERNAME="$user_name" -# Openstack Tenant ID = $tenant_id +# OpenStack Tenant ID = $tenant_id export OS_TENANT_NAME="$tenant_name" export OS_AUTH_URL="$OS_AUTH_URL" export OS_CACERT="$OS_CACERT" diff --git a/tools/jenkins/README.md b/tools/jenkins/README.md index 371017db1a..3586da9c66 100644 --- a/tools/jenkins/README.md +++ b/tools/jenkins/README.md @@ -1,6 +1,6 @@ Getting Started With Jenkins and Devstack ========================================= -This little corner of devstack is to show how to get an Openstack jenkins +This little corner of devstack is to show how to get an OpenStack jenkins environment up and running quickly, using the rcb configuration methodology. diff --git a/tools/xen/README.md b/tools/xen/README.md index ee1abcc091..712782bc5f 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -1,11 +1,11 @@ # Getting Started With XenServer and Devstack The purpose of the code in this directory it to help developers bootstrap a -XenServer 6.2 (older versions may also work) + Openstack development +XenServer 6.2 (older versions may also work) + OpenStack development environment. This file gives some pointers on how to get started. Xenserver is a Type 1 hypervisor, so it is best installed on bare metal. The -Openstack services are configured to run within a virtual machine (called OS +OpenStack services are configured to run within a virtual machine (called OS domU) on the XenServer host. The VM uses the XAPI toolstack to communicate with the host over a network connection (see `MGT_BRIDGE_OR_NET_NAME`). From 1814e671d3af0231aa18a08d3406d54332f9b4ef Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Tue, 11 Feb 2014 17:56:07 +0100 Subject: [PATCH 0454/4438] Fix bug "Invalid OpenStack Nova credentials." on the gate During the process, when create_userrc.sh tries to creates certificates and sourcable rc, it assumes that all users have the same password. Change-Id: Ifb57a43aad439ffe041e98465719a8a8eceae544 Closes-Bug: #1260723 --- lib/swift | 11 ++++++++--- tools/create_userrc.sh | 8 ++++++++ 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/lib/swift b/lib/swift index 0febb00f60..be25c81468 100644 --- a/lib/swift +++ b/lib/swift @@ -520,6 +520,11 @@ function create_swift_disk() { # swifttenanttest2 swiftusertest2 admin function create_swift_accounts() { + # Defines specific passwords used by tools/create_userrc.sh + SWIFTUSERTEST1_PASSWORD=testing + SWIFTUSERTEST2_PASSWORD=testing2 + SWIFTUSERTEST3_PASSWORD=testing3 + KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql} SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") @@ -542,17 +547,17 @@ function create_swift_accounts() { SWIFT_TENANT_TEST1=$(keystone tenant-create --name=swifttenanttest1 | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_TENANT_TEST1 "Failure creating SWIFT_TENANT_TEST1" - SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=testing --email=test@example.com | grep " id " | get_field 2) + SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=$SWIFTUSERTEST1_PASSWORD --email=test@example.com | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1" keystone user-role-add --user-id $SWIFT_USER_TEST1 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST1 - SWIFT_USER_TEST3=$(keystone user-create --name=swiftusertest3 --pass=testing3 --email=test3@example.com | grep " id " | get_field 2) + SWIFT_USER_TEST3=$(keystone user-create --name=swiftusertest3 --pass=$SWIFTUSERTEST3_PASSWORD --email=test3@example.com | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_USER_TEST3 "Failure creating SWIFT_USER_TEST3" keystone user-role-add --user-id $SWIFT_USER_TEST3 --role-id $ANOTHER_ROLE --tenant-id $SWIFT_TENANT_TEST1 SWIFT_TENANT_TEST2=$(keystone tenant-create --name=swifttenanttest2 | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_TENANT_TEST2 "Failure creating SWIFT_TENANT_TEST2" - SWIFT_USER_TEST2=$(keystone user-create --name=swiftusertest2 --pass=testing2 --email=test2@example.com | grep " id " | get_field 2) + SWIFT_USER_TEST2=$(keystone user-create --name=swiftusertest2 --pass=$SWIFTUSERTEST2_PASSWORD --email=test2@example.com | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_USER_TEST2 "Failure creating SWIFT_USER_TEST2" keystone user-role-add --user-id $SWIFT_USER_TEST2 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST2 } diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index e2d855c4df..d9c93cc476 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -253,6 +253,14 @@ if [ $MODE != "create" ]; then if [ $MODE = one -a "$user_name" != "$USER_NAME" ]; then continue; fi + + # Checks for a specific password defined for an user. + # Example for an username johndoe: + # JOHNDOE_PASSWORD=1234 + eval SPECIFIC_UPASSWORD="\$${USER_NAME^^}_PASSWORD" + if [ -n "$SPECIFIC_UPASSWORD" ]; then + USER_PASS=$SPECIFIC_UPASSWORD + fi add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" done done From dff49a242eef817efa23d4e808aaa6a74ac82ed0 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 30 Jan 2014 15:37:40 -0600 Subject: [PATCH 0455/4438] Split functions Move shared and non-DevStack-specific functions to `functions-common`. This is a code move only with some updated comments. The functions are now sorted alphabetically within function groups, eg. all git-related functions are grouped together. The groups are listed at the top of the file. 'functions' sources 'functions-common' so no additional changes are required for backward-compatability. All functions shared with Grenade have also been moved. functions-common was created from commit e0ed8ea038299952826b27a16753775472f108d8 Change-Id: I73bf7134fd6a60ec1ea44a5bfab08b0569b60ded --- functions | 1422 +-------------------------------------------- functions-common | 1433 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 1439 insertions(+), 1416 deletions(-) create mode 100644 functions-common diff --git a/functions b/functions index dc3278b56d..5eae7fe510 100644 --- a/functions +++ b/functions @@ -1,563 +1,21 @@ -# functions - Common functions used by DevStack components +# functions - DevStack-specific functions # # The following variables are assumed to be defined by certain functions: # # - ``ENABLED_SERVICES`` -# - ``ERROR_ON_CLONE`` # - ``FILES`` # - ``GLANCE_HOSTPORT`` -# - ``OFFLINE`` -# - ``PIP_DOWNLOAD_CACHE`` -# - ``PIP_USE_MIRRORS`` -# - ``RECLONE`` # - ``TRACK_DEPENDS`` -# - ``http_proxy``, ``https_proxy``, ``no_proxy`` +# Include the common functions +FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) +source ${FUNC_DIR}/functions-common # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace -# Convert CIDR notation to a IPv4 netmask -# cidr2netmask cidr-bits -function cidr2netmask() { - local maskpat="255 255 255 255" - local maskdgt="254 252 248 240 224 192 128" - set -- ${maskpat:0:$(( ($1 / 8) * 4 ))}${maskdgt:$(( (7 - ($1 % 8)) * 4 )):3} - echo ${1-0}.${2-0}.${3-0}.${4-0} -} - - -# Return the network portion of the given IP address using netmask -# netmask is in the traditional dotted-quad format -# maskip ip-address netmask -function maskip() { - local ip=$1 - local mask=$2 - local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}" - local subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.})) - echo $subnet -} - - -# Exit 0 if address is in network or 1 if address is not in network -# ip-range is in CIDR notation: 1.2.3.4/20 -# address_in_net ip-address ip-range -function address_in_net() { - local ip=$1 - local range=$2 - local masklen=${range#*/} - local network=$(maskip ${range%/*} $(cidr2netmask $masklen)) - local subnet=$(maskip $ip $(cidr2netmask $masklen)) - [[ $network == $subnet ]] -} - - -# Wrapper for ``apt-get`` to set cache and proxy environment variables -# Uses globals ``OFFLINE``, ``*_proxy`` -# apt_get operation package [package ...] -function apt_get() { - [[ "$OFFLINE" = "True" || -z "$@" ]] && return - local sudo="sudo" - [[ "$(id -u)" = "0" ]] && sudo="env" - $sudo DEBIAN_FRONTEND=noninteractive \ - http_proxy=$http_proxy https_proxy=$https_proxy \ - no_proxy=$no_proxy \ - apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@" -} - - -# Gracefully cp only if source file/dir exists -# cp_it source destination -function cp_it { - if [ -e $1 ] || [ -d $1 ]; then - cp -pRL $1 $2 - fi -} - - -# Prints backtrace info -# filename:lineno:function -function backtrace { - local level=$1 - local deep=$((${#BASH_SOURCE[@]} - 1)) - echo "[Call Trace]" - while [ $level -le $deep ]; do - echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}" - deep=$((deep - 1)) - done -} - - -# Prints line number and "message" then exits -# die $LINENO "message" -function die() { - local exitcode=$? - set +o xtrace - local line=$1; shift - if [ $exitcode == 0 ]; then - exitcode=1 - fi - backtrace 2 - err $line "$*" - exit $exitcode -} - - -# Checks an environment variable is not set or has length 0 OR if the -# exit code is non-zero and prints "message" and exits -# NOTE: env-var is the variable name without a '$' -# die_if_not_set $LINENO env-var "message" -function die_if_not_set() { - local exitcode=$? - FXTRACE=$(set +o | grep xtrace) - set +o xtrace - local line=$1; shift - local evar=$1; shift - if ! is_set $evar || [ $exitcode != 0 ]; then - die $line "$*" - fi - $FXTRACE -} - - -# Prints line number and "message" in error format -# err $LINENO "message" -function err() { - local exitcode=$? - errXTRACE=$(set +o | grep xtrace) - set +o xtrace - local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2" - echo $msg 1>&2; - if [[ -n ${SCREEN_LOGDIR} ]]; then - echo $msg >> "${SCREEN_LOGDIR}/error.log" - fi - $errXTRACE - return $exitcode -} - - -# Checks an environment variable is not set or has length 0 OR if the -# exit code is non-zero and prints "message" -# NOTE: env-var is the variable name without a '$' -# err_if_not_set $LINENO env-var "message" -function err_if_not_set() { - local exitcode=$? - errinsXTRACE=$(set +o | grep xtrace) - set +o xtrace - local line=$1; shift - local evar=$1; shift - if ! is_set $evar || [ $exitcode != 0 ]; then - err $line "$*" - fi - $errinsXTRACE - return $exitcode -} - - -# Prints line number and "message" in warning format -# warn $LINENO "message" -function warn() { - local exitcode=$? - errXTRACE=$(set +o | grep xtrace) - set +o xtrace - local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2" - echo $msg 1>&2; - if [[ -n ${SCREEN_LOGDIR} ]]; then - echo $msg >> "${SCREEN_LOGDIR}/error.log" - fi - $errXTRACE - return $exitcode -} - - -# HTTP and HTTPS proxy servers are supported via the usual environment variables [1] -# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in -# ``localrc`` or on the command line if necessary:: -# -# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html -# -# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh - -function export_proxy_variables() { - if [[ -n "$http_proxy" ]]; then - export http_proxy=$http_proxy - fi - if [[ -n "$https_proxy" ]]; then - export https_proxy=$https_proxy - fi - if [[ -n "$no_proxy" ]]; then - export no_proxy=$no_proxy - fi -} - - -# Grab a numbered field from python prettytable output -# Fields are numbered starting with 1 -# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc. -# get_field field-number -function get_field() { - while read data; do - if [ "$1" -lt 0 ]; then - field="(\$(NF$1))" - else - field="\$$(($1 + 1))" - fi - echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}" - done -} - - -# Get the default value for HOST_IP -# get_default_host_ip fixed_range floating_range host_ip_iface host_ip -function get_default_host_ip() { - local fixed_range=$1 - local floating_range=$2 - local host_ip_iface=$3 - local host_ip=$4 - - # Find the interface used for the default route - host_ip_iface=${host_ip_iface:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)} - # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable - if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then - host_ip="" - host_ips=`LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/"); print parts[1]}'` - for IP in $host_ips; do - # Attempt to filter out IP addresses that are part of the fixed and - # floating range. Note that this method only works if the ``netaddr`` - # python library is installed. If it is not installed, an error - # will be printed and the first IP from the interface will be used. - # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct - # address. - if ! (address_in_net $IP $fixed_range || address_in_net $IP $floating_range); then - host_ip=$IP - break; - fi - done - fi - echo $host_ip -} - - -function _get_package_dir() { - local pkg_dir - if is_ubuntu; then - pkg_dir=$FILES/apts - elif is_fedora; then - pkg_dir=$FILES/rpms - elif is_suse; then - pkg_dir=$FILES/rpms-suse - else - exit_distro_not_supported "list of packages" - fi - echo "$pkg_dir" -} - - -# get_packages() collects a list of package names of any type from the -# prerequisite files in ``files/{apts|rpms}``. The list is intended -# to be passed to a package installer such as apt or yum. -# -# Only packages required for the services in 1st argument will be -# included. Two bits of metadata are recognized in the prerequisite files: -# -# - ``# NOPRIME`` defers installation to be performed later in `stack.sh` -# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection -# of the package to the distros listed. The distro names are case insensitive. -function get_packages() { - local services=$@ - local package_dir=$(_get_package_dir) - local file_to_parse - local service - - if [[ -z "$package_dir" ]]; then - echo "No package directory supplied" - return 1 - fi - if [[ -z "$DISTRO" ]]; then - GetDistro - fi - for service in ${services//,/ }; do - # Allow individual services to specify dependencies - if [[ -e ${package_dir}/${service} ]]; then - file_to_parse="${file_to_parse} $service" - fi - # NOTE(sdague) n-api needs glance for now because that's where - # glance client is - if [[ $service == n-api ]]; then - if [[ ! $file_to_parse =~ nova ]]; then - file_to_parse="${file_to_parse} nova" - fi - if [[ ! $file_to_parse =~ glance ]]; then - file_to_parse="${file_to_parse} glance" - fi - elif [[ $service == c-* ]]; then - if [[ ! $file_to_parse =~ cinder ]]; then - file_to_parse="${file_to_parse} cinder" - fi - elif [[ $service == ceilometer-* ]]; then - if [[ ! $file_to_parse =~ ceilometer ]]; then - file_to_parse="${file_to_parse} ceilometer" - fi - elif [[ $service == s-* ]]; then - if [[ ! $file_to_parse =~ swift ]]; then - file_to_parse="${file_to_parse} swift" - fi - elif [[ $service == n-* ]]; then - if [[ ! $file_to_parse =~ nova ]]; then - file_to_parse="${file_to_parse} nova" - fi - elif [[ $service == g-* ]]; then - if [[ ! $file_to_parse =~ glance ]]; then - file_to_parse="${file_to_parse} glance" - fi - elif [[ $service == key* ]]; then - if [[ ! $file_to_parse =~ keystone ]]; then - file_to_parse="${file_to_parse} keystone" - fi - elif [[ $service == q-* ]]; then - if [[ ! $file_to_parse =~ neutron ]]; then - file_to_parse="${file_to_parse} neutron" - fi - fi - done - - for file in ${file_to_parse}; do - local fname=${package_dir}/${file} - local OIFS line package distros distro - [[ -e $fname ]] || continue - - OIFS=$IFS - IFS=$'\n' - for line in $(<${fname}); do - if [[ $line =~ "NOPRIME" ]]; then - continue - fi - - # Assume we want this package - package=${line%#*} - inst_pkg=1 - - # Look for # dist:xxx in comment - if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then - # We are using BASH regexp matching feature. - package=${BASH_REMATCH[1]} - distros=${BASH_REMATCH[2]} - # In bash ${VAR,,} will lowecase VAR - # Look for a match in the distro list - if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then - # If no match then skip this package - inst_pkg=0 - fi - fi - - # Look for # testonly in comment - if [[ $line =~ (.*)#.*testonly.* ]]; then - package=${BASH_REMATCH[1]} - # Are we installing test packages? (test for the default value) - if [[ $INSTALL_TESTONLY_PACKAGES = "False" ]]; then - # If not installing test packages the skip this package - inst_pkg=0 - fi - fi - - if [[ $inst_pkg = 1 ]]; then - echo $package - fi - done - IFS=$OIFS - done -} - - -# Determine OS Vendor, Release and Update -# Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora -# Returns results in global variables: -# os_VENDOR - vendor name -# os_RELEASE - release -# os_UPDATE - update -# os_PACKAGE - package type -# os_CODENAME - vendor's codename for release -# GetOSVersion -GetOSVersion() { - # Figure out which vendor we are - if [[ -x "`which sw_vers 2>/dev/null`" ]]; then - # OS/X - os_VENDOR=`sw_vers -productName` - os_RELEASE=`sw_vers -productVersion` - os_UPDATE=${os_RELEASE##*.} - os_RELEASE=${os_RELEASE%.*} - os_PACKAGE="" - if [[ "$os_RELEASE" =~ "10.7" ]]; then - os_CODENAME="lion" - elif [[ "$os_RELEASE" =~ "10.6" ]]; then - os_CODENAME="snow leopard" - elif [[ "$os_RELEASE" =~ "10.5" ]]; then - os_CODENAME="leopard" - elif [[ "$os_RELEASE" =~ "10.4" ]]; then - os_CODENAME="tiger" - elif [[ "$os_RELEASE" =~ "10.3" ]]; then - os_CODENAME="panther" - else - os_CODENAME="" - fi - elif [[ -x $(which lsb_release 2>/dev/null) ]]; then - os_VENDOR=$(lsb_release -i -s) - os_RELEASE=$(lsb_release -r -s) - os_UPDATE="" - os_PACKAGE="rpm" - if [[ "Debian,Ubuntu,LinuxMint" =~ $os_VENDOR ]]; then - os_PACKAGE="deb" - elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then - lsb_release -d -s | grep -q openSUSE - if [[ $? -eq 0 ]]; then - os_VENDOR="openSUSE" - fi - elif [[ $os_VENDOR == "openSUSE project" ]]; then - os_VENDOR="openSUSE" - elif [[ $os_VENDOR =~ Red.*Hat ]]; then - os_VENDOR="Red Hat" - fi - os_CODENAME=$(lsb_release -c -s) - elif [[ -r /etc/redhat-release ]]; then - # Red Hat Enterprise Linux Server release 5.5 (Tikanga) - # Red Hat Enterprise Linux Server release 7.0 Beta (Maipo) - # CentOS release 5.5 (Final) - # CentOS Linux release 6.0 (Final) - # Fedora release 16 (Verne) - # XenServer release 6.2.0-70446c (xenenterprise) - os_CODENAME="" - for r in "Red Hat" CentOS Fedora XenServer; do - os_VENDOR=$r - if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then - ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release` - os_CODENAME=${ver#*|} - os_RELEASE=${ver%|*} - os_UPDATE=${os_RELEASE##*.} - os_RELEASE=${os_RELEASE%.*} - break - fi - os_VENDOR="" - done - os_PACKAGE="rpm" - elif [[ -r /etc/SuSE-release ]]; then - for r in openSUSE "SUSE Linux"; do - if [[ "$r" = "SUSE Linux" ]]; then - os_VENDOR="SUSE LINUX" - else - os_VENDOR=$r - fi - - if [[ -n "`grep \"$r\" /etc/SuSE-release`" ]]; then - os_CODENAME=`grep "CODENAME = " /etc/SuSE-release | sed 's:.* = ::g'` - os_RELEASE=`grep "VERSION = " /etc/SuSE-release | sed 's:.* = ::g'` - os_UPDATE=`grep "PATCHLEVEL = " /etc/SuSE-release | sed 's:.* = ::g'` - break - fi - os_VENDOR="" - done - os_PACKAGE="rpm" - # If lsb_release is not installed, we should be able to detect Debian OS - elif [[ -f /etc/debian_version ]] && [[ $(cat /proc/version) =~ "Debian" ]]; then - os_VENDOR="Debian" - os_PACKAGE="deb" - os_CODENAME=$(awk '/VERSION=/' /etc/os-release | sed 's/VERSION=//' | sed -r 's/\"|\(|\)//g' | awk '{print $2}') - os_RELEASE=$(awk '/VERSION_ID=/' /etc/os-release | sed 's/VERSION_ID=//' | sed 's/\"//g') - fi - export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME -} - - -# Translate the OS version values into common nomenclature -# Sets ``DISTRO`` from the ``os_*`` values -function GetDistro() { - GetOSVersion - if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then - # 'Everyone' refers to Ubuntu / Debian releases by the code name adjective - DISTRO=$os_CODENAME - elif [[ "$os_VENDOR" =~ (Fedora) ]]; then - # For Fedora, just use 'f' and the release - DISTRO="f$os_RELEASE" - elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then - DISTRO="opensuse-$os_RELEASE" - elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then - # For SLE, also use the service pack - if [[ -z "$os_UPDATE" ]]; then - DISTRO="sle${os_RELEASE}" - else - DISTRO="sle${os_RELEASE}sp${os_UPDATE}" - fi - elif [[ "$os_VENDOR" =~ (Red Hat) || "$os_VENDOR" =~ (CentOS) ]]; then - # Drop the . release as we assume it's compatible - DISTRO="rhel${os_RELEASE::1}" - elif [[ "$os_VENDOR" =~ (XenServer) ]]; then - DISTRO="xs$os_RELEASE" - else - # Catch-all for now is Vendor + Release + Update - DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE" - fi - export DISTRO -} - - -# Determine if current distribution is a Fedora-based distribution -# (Fedora, RHEL, CentOS, etc). -# is_fedora -function is_fedora { - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || [ "$os_VENDOR" = "CentOS" ] -} - - -# Determine if current distribution is a SUSE-based distribution -# (openSUSE, SLE). -# is_suse -function is_suse { - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - [ "$os_VENDOR" = "openSUSE" ] || [ "$os_VENDOR" = "SUSE LINUX" ] -} - - -# Determine if current distribution is an Ubuntu-based distribution -# It will also detect non-Ubuntu but Debian-based distros -# is_ubuntu -function is_ubuntu { - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - [ "$os_PACKAGE" = "deb" ] -} - - -# Exit after outputting a message about the distribution not being supported. -# exit_distro_not_supported [optional-string-telling-what-is-missing] -function exit_distro_not_supported { - if [[ -z "$DISTRO" ]]; then - GetDistro - fi - - if [ $# -gt 0 ]; then - die $LINENO "Support for $DISTRO is incomplete: no support for $@" - else - die $LINENO "Support for $DISTRO is incomplete." - fi -} - -# Utility function for checking machine architecture -# is_arch arch-type -function is_arch { - ARCH_TYPE=$1 - - [[ "$(uname -m)" == "$ARCH_TYPE" ]] -} - # Checks if installed Apache is <= given version # $1 = x.y.z (version string of Apache) function check_apache_version { @@ -570,488 +28,6 @@ function check_apache_version { expr "$version" '>=' $1 > /dev/null } -# git clone only if directory doesn't exist already. Since ``DEST`` might not -# be owned by the installation user, we create the directory and change the -# ownership to the proper user. -# Set global RECLONE=yes to simulate a clone when dest-dir exists -# Set global ERROR_ON_CLONE=True to abort execution with an error if the git repo -# does not exist (default is False, meaning the repo will be cloned). -# Uses global ``OFFLINE`` -# git_clone remote dest-dir branch -function git_clone { - GIT_REMOTE=$1 - GIT_DEST=$2 - GIT_REF=$3 - RECLONE=$(trueorfalse False $RECLONE) - - if [[ "$OFFLINE" = "True" ]]; then - echo "Running in offline mode, clones already exist" - # print out the results so we know what change was used in the logs - cd $GIT_DEST - git show --oneline | head -1 - return - fi - - if echo $GIT_REF | egrep -q "^refs"; then - # If our branch name is a gerrit style refs/changes/... - if [[ ! -d $GIT_DEST ]]; then - [[ "$ERROR_ON_CLONE" = "True" ]] && \ - die $LINENO "Cloning not allowed in this configuration" - git clone $GIT_REMOTE $GIT_DEST - fi - cd $GIT_DEST - git fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD - else - # do a full clone only if the directory doesn't exist - if [[ ! -d $GIT_DEST ]]; then - [[ "$ERROR_ON_CLONE" = "True" ]] && \ - die $LINENO "Cloning not allowed in this configuration" - git clone $GIT_REMOTE $GIT_DEST - cd $GIT_DEST - # This checkout syntax works for both branches and tags - git checkout $GIT_REF - elif [[ "$RECLONE" = "True" ]]; then - # if it does exist then simulate what clone does if asked to RECLONE - cd $GIT_DEST - # set the url to pull from and fetch - git remote set-url origin $GIT_REMOTE - git fetch origin - # remove the existing ignored files (like pyc) as they cause breakage - # (due to the py files having older timestamps than our pyc, so python - # thinks the pyc files are correct using them) - find $GIT_DEST -name '*.pyc' -delete - - # handle GIT_REF accordingly to type (tag, branch) - if [[ -n "`git show-ref refs/tags/$GIT_REF`" ]]; then - git_update_tag $GIT_REF - elif [[ -n "`git show-ref refs/heads/$GIT_REF`" ]]; then - git_update_branch $GIT_REF - elif [[ -n "`git show-ref refs/remotes/origin/$GIT_REF`" ]]; then - git_update_remote_branch $GIT_REF - else - die $LINENO "$GIT_REF is neither branch nor tag" - fi - - fi - fi - - # print out the results so we know what change was used in the logs - cd $GIT_DEST - git show --oneline | head -1 -} - - -# git update using reference as a branch. -# git_update_branch ref -function git_update_branch() { - - GIT_BRANCH=$1 - - git checkout -f origin/$GIT_BRANCH - # a local branch might not exist - git branch -D $GIT_BRANCH || true - git checkout -b $GIT_BRANCH -} - - -# git update using reference as a branch. -# git_update_remote_branch ref -function git_update_remote_branch() { - - GIT_BRANCH=$1 - - git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH -} - - -# git update using reference as a tag. Be careful editing source at that repo -# as working copy will be in a detached mode -# git_update_tag ref -function git_update_tag() { - - GIT_TAG=$1 - - git tag -d $GIT_TAG - # fetching given tag only - git fetch origin tag $GIT_TAG - git checkout -f $GIT_TAG -} - - -# Comment an option in an INI file -# inicomment config-file section option -function inicomment() { - local file=$1 - local section=$2 - local option=$3 - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" -} - - -# Uncomment an option in an INI file -# iniuncomment config-file section option -function iniuncomment() { - local file=$1 - local section=$2 - local option=$3 - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file" -} - - -# Get an option from an INI file -# iniget config-file section option -function iniget() { - local file=$1 - local section=$2 - local option=$3 - local line - line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") - echo ${line#*=} -} - - -# Determinate is the given option present in the INI file -# ini_has_option config-file section option -function ini_has_option() { - local file=$1 - local section=$2 - local option=$3 - local line - line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") - [ -n "$line" ] -} - - -# Set an option in an INI file -# iniset config-file section option value -function iniset() { - local file=$1 - local section=$2 - local option=$3 - local value=$4 - - [[ -z $section || -z $option ]] && return - - if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then - # Add section at the end - echo -e "\n[$section]" >>"$file" - fi - if ! ini_has_option "$file" "$section" "$option"; then - # Add it - sed -i -e "/^\[$section\]/ a\\ -$option = $value -" "$file" - else - local sep=$(echo -ne "\x01") - # Replace it - sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" - fi -} - - -# Get a multiple line option from an INI file -# iniget_multiline config-file section option -function iniget_multiline() { - local file=$1 - local section=$2 - local option=$3 - local values - values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file") - echo ${values} -} - - -# Set a multiple line option in an INI file -# iniset_multiline config-file section option value1 value2 valu3 ... -function iniset_multiline() { - local file=$1 - local section=$2 - local option=$3 - shift 3 - local values - for v in $@; do - # The later sed command inserts each new value in the line next to - # the section identifier, which causes the values to be inserted in - # the reverse order. Do a reverse here to keep the original order. - values="$v ${values}" - done - if ! grep -q "^\[$section\]" "$file"; then - # Add section at the end - echo -e "\n[$section]" >>"$file" - else - # Remove old values - sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file" - fi - # Add new ones - for v in $values; do - sed -i -e "/^\[$section\]/ a\\ -$option = $v -" "$file" - done -} - - -# Append a new option in an ini file without replacing the old value -# iniadd config-file section option value1 value2 value3 ... -function iniadd() { - local file=$1 - local section=$2 - local option=$3 - shift 3 - local values="$(iniget_multiline $file $section $option) $@" - iniset_multiline $file $section $option $values -} - -# Find out if a process exists by partial name. -# is_running name -function is_running() { - local name=$1 - ps auxw | grep -v grep | grep ${name} > /dev/null - RC=$? - # some times I really hate bash reverse binary logic - return $RC -} - - -# is_service_enabled() checks if the service(s) specified as arguments are -# enabled by the user in ``ENABLED_SERVICES``. -# -# Multiple services specified as arguments are ``OR``'ed together; the test -# is a short-circuit boolean, i.e it returns on the first match. -# -# There are special cases for some 'catch-all' services:: -# **nova** returns true if any service enabled start with **n-** -# **cinder** returns true if any service enabled start with **c-** -# **ceilometer** returns true if any service enabled start with **ceilometer** -# **glance** returns true if any service enabled start with **g-** -# **neutron** returns true if any service enabled start with **q-** -# **swift** returns true if any service enabled start with **s-** -# **trove** returns true if any service enabled start with **tr-** -# For backward compatibility if we have **swift** in ENABLED_SERVICES all the -# **s-** services will be enabled. This will be deprecated in the future. -# -# Cells within nova is enabled if **n-cell** is in ``ENABLED_SERVICES``. -# We also need to make sure to treat **n-cell-region** and **n-cell-child** -# as enabled in this case. -# -# Uses global ``ENABLED_SERVICES`` -# is_service_enabled service [service ...] -function is_service_enabled() { - services=$@ - for service in ${services}; do - [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 - - # Look for top-level 'enabled' function for this service - if type is_${service}_enabled >/dev/null 2>&1; then - # A function exists for this service, use it - is_${service}_enabled - return $? - fi - - # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled() - # are implemented - [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && return 0 - [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0 - [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0 - [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0 - [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0 - [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && return 0 - [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 - [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && return 0 - [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0 - [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0 - done - return 1 -} - - -# remove extra commas from the input string (i.e. ``ENABLED_SERVICES``) -# _cleanup_service_list service-list -function _cleanup_service_list () { - echo "$1" | sed -e ' - s/,,/,/g; - s/^,//; - s/,$// - ' -} - - -# enable_service() adds the services passed as argument to the -# ``ENABLED_SERVICES`` list, if they are not already present. -# -# For example: -# enable_service qpid -# -# This function does not know about the special cases -# for nova, glance, and neutron built into is_service_enabled(). -# Uses global ``ENABLED_SERVICES`` -# enable_service service [service ...] -function enable_service() { - local tmpsvcs="${ENABLED_SERVICES}" - for service in $@; do - if ! is_service_enabled $service; then - tmpsvcs+=",$service" - fi - done - ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") - disable_negated_services -} - - -# disable_service() removes the services passed as argument to the -# ``ENABLED_SERVICES`` list, if they are present. -# -# For example: -# disable_service rabbit -# -# This function does not know about the special cases -# for nova, glance, and neutron built into is_service_enabled(). -# Uses global ``ENABLED_SERVICES`` -# disable_service service [service ...] -function disable_service() { - local tmpsvcs=",${ENABLED_SERVICES}," - local service - for service in $@; do - if is_service_enabled $service; then - tmpsvcs=${tmpsvcs//,$service,/,} - fi - done - ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") -} - - -# disable_all_services() removes all current services -# from ``ENABLED_SERVICES`` to reset the configuration -# before a minimal installation -# Uses global ``ENABLED_SERVICES`` -# disable_all_services -function disable_all_services() { - ENABLED_SERVICES="" -} - - -# Remove all services starting with '-'. For example, to install all default -# services except rabbit (rabbit) set in ``localrc``: -# ENABLED_SERVICES+=",-rabbit" -# Uses global ``ENABLED_SERVICES`` -# disable_negated_services -function disable_negated_services() { - local tmpsvcs="${ENABLED_SERVICES}" - local service - for service in ${tmpsvcs//,/ }; do - if [[ ${service} == -* ]]; then - tmpsvcs=$(echo ${tmpsvcs}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g") - fi - done - ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") -} - - -# Distro-agnostic package installer -# install_package package [package ...] -function install_package() { - if is_ubuntu; then - [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update - NO_UPDATE_REPOS=True - - apt_get install "$@" - elif is_fedora; then - yum_install "$@" - elif is_suse; then - zypper_install "$@" - else - exit_distro_not_supported "installing packages" - fi -} - - -# Distro-agnostic package uninstaller -# uninstall_package package [package ...] -function uninstall_package() { - if is_ubuntu; then - apt_get purge "$@" - elif is_fedora; then - sudo yum remove -y "$@" - elif is_suse; then - sudo zypper rm "$@" - else - exit_distro_not_supported "uninstalling packages" - fi -} - - -# Distro-agnostic function to tell if a package is installed -# is_package_installed package [package ...] -function is_package_installed() { - if [[ -z "$@" ]]; then - return 1 - fi - - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - - if [[ "$os_PACKAGE" = "deb" ]]; then - dpkg -s "$@" > /dev/null 2> /dev/null - elif [[ "$os_PACKAGE" = "rpm" ]]; then - rpm --quiet -q "$@" - else - exit_distro_not_supported "finding if a package is installed" - fi -} - - -# Test if the named environment variable is set and not zero length -# is_set env-var -function is_set() { - local var=\$"$1" - eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this -} - - -# Wrapper for ``pip install`` to set cache and proxy environment variables -# Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``PIP_USE_MIRRORS``, -# ``TRACK_DEPENDS``, ``*_proxy`` -# pip_install package [package ...] -function pip_install { - [[ "$OFFLINE" = "True" || -z "$@" ]] && return - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - if [[ $TRACK_DEPENDS = True ]]; then - source $DEST/.venv/bin/activate - CMD_PIP=$DEST/.venv/bin/pip - SUDO_PIP="env" - else - SUDO_PIP="sudo" - CMD_PIP=$(get_pip_command) - fi - - # Mirror option not needed anymore because pypi has CDN available, - # but it's useful in certain circumstances - PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False} - if [[ "$PIP_USE_MIRRORS" != "False" ]]; then - PIP_MIRROR_OPT="--use-mirrors" - fi - - # pip < 1.4 has a bug where it will use an already existing build - # directory unconditionally. Say an earlier component installs - # foo v1.1; pip will have built foo's source in - # /tmp/$USER-pip-build. Even if a later component specifies foo < - # 1.1, the existing extracted build will be used and cause - # confusing errors. By creating unique build directories we avoid - # this problem. See https://github.com/pypa/pip/issues/709 - local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX) - - $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \ - HTTP_PROXY=$http_proxy \ - HTTPS_PROXY=$https_proxy \ - NO_PROXY=$no_proxy \ - $CMD_PIP install --build=${pip_build_tmp} \ - $PIP_MIRROR_OPT $@ \ - && $SUDO_PIP rm -rf ${pip_build_tmp} -} - # Cleanup anything from /tmp on unstack # clean_tmp @@ -1062,243 +38,6 @@ function cleanup_tmp { sudo rm -rf ${tmp_dir}/pip-build.* } -# Service wrapper to restart services -# restart_service service-name -function restart_service() { - if is_ubuntu; then - sudo /usr/sbin/service $1 restart - else - sudo /sbin/service $1 restart - fi -} - - -# _run_process() is designed to be backgrounded by run_process() to simulate a -# fork. It includes the dirty work of closing extra filehandles and preparing log -# files to produce the same logs as screen_it(). The log filename is derived -# from the service name and global-and-now-misnamed SCREEN_LOGDIR -# _run_process service "command-line" -function _run_process() { - local service=$1 - local command="$2" - - # Undo logging redirections and close the extra descriptors - exec 1>&3 - exec 2>&3 - exec 3>&- - exec 6>&- - - if [[ -n ${SCREEN_LOGDIR} ]]; then - exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1 - ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log - - # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs. - export PYTHONUNBUFFERED=1 - fi - - exec /bin/bash -c "$command" - die "$service exec failure: $command" -} - - -# run_process() launches a child process that closes all file descriptors and -# then exec's the passed in command. This is meant to duplicate the semantics -# of screen_it() without screen. PIDs are written to -# $SERVICE_DIR/$SCREEN_NAME/$service.pid -# run_process service "command-line" -function run_process() { - local service=$1 - local command="$2" - - # Spawn the child process - _run_process "$service" "$command" & - echo $! -} - - -# Helper to launch a service in a named screen -# screen_it service "command-line" -function screen_it { - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - USE_SCREEN=$(trueorfalse True $USE_SCREEN) - - if is_service_enabled $1; then - # Append the service to the screen rc file - screen_rc "$1" "$2" - - if [[ "$USE_SCREEN" = "True" ]]; then - screen -S $SCREEN_NAME -X screen -t $1 - - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log - screen -S $SCREEN_NAME -p $1 -X log on - ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log - fi - - # sleep to allow bash to be ready to be send the command - we are - # creating a new window in screen and then sends characters, so if - # bash isn't running by the time we send the command, nothing happens - sleep 1.5 - - NL=`echo -ne '\015'` - # This fun command does the following: - # - the passed server command is backgrounded - # - the pid of the background process is saved in the usual place - # - the server process is brought back to the foreground - # - if the server process exits prematurely the fg command errors - # and a message is written to stdout and the service failure file - # The pid saved can be used in screen_stop() as a process group - # id to kill off all child processes - screen -S $SCREEN_NAME -p $1 -X stuff "$2 & echo \$! >$SERVICE_DIR/$SCREEN_NAME/$1.pid; fg || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" - else - # Spawn directly without screen - run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid - fi - fi -} - - -# Stop a service in screen -# If a PID is available use it, kill the whole process group via TERM -# If screen is being used kill the screen window; this will catch processes -# that did not leave a PID behind -# screen_stop service -function screen_stop() { - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - USE_SCREEN=$(trueorfalse True $USE_SCREEN) - - if is_service_enabled $1; then - # Kill via pid if we have one available - if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then - pkill -TERM -P -$(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid) - rm $SERVICE_DIR/$SCREEN_NAME/$1.pid - fi - if [[ "$USE_SCREEN" = "True" ]]; then - # Clean up the screen window - screen -S $SCREEN_NAME -p $1 -X kill - fi - fi -} - - -# Screen rc file builder -# screen_rc service "command-line" -function screen_rc { - SCREEN_NAME=${SCREEN_NAME:-stack} - SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc - if [[ ! -e $SCREENRC ]]; then - # Name the screen session - echo "sessionname $SCREEN_NAME" > $SCREENRC - # Set a reasonable statusbar - echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC - # Some distributions override PROMPT_COMMAND for the screen terminal type - turn that off - echo "setenv PROMPT_COMMAND /bin/true" >> $SCREENRC - echo "screen -t shell bash" >> $SCREENRC - fi - # If this service doesn't already exist in the screenrc file - if ! grep $1 $SCREENRC 2>&1 > /dev/null; then - NL=`echo -ne '\015'` - echo "screen -t $1 bash" >> $SCREENRC - echo "stuff \"$2$NL\"" >> $SCREENRC - - if [[ -n ${SCREEN_LOGDIR} ]]; then - echo "logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log" >>$SCREENRC - echo "log on" >>$SCREENRC - fi - fi -} - - -# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``. -# This is used for ``service_check`` when all the ``screen_it`` are called finished -# init_service_check -function init_service_check() { - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - - if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then - mkdir -p "$SERVICE_DIR/$SCREEN_NAME" - fi - - rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure -} - - -# Helper to get the status of each running service -# service_check -function service_check() { - local service - local failures - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - - - if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then - echo "No service status directory found" - return - fi - - # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME - failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null` - - for service in $failures; do - service=`basename $service` - service=${service%.failure} - echo "Error: Service $service is not running" - done - - if [ -n "$failures" ]; then - echo "More details about the above errors can be found with screen, with ./rejoin-stack.sh" - fi -} - -# Returns true if the directory is on a filesystem mounted via NFS. -function is_nfs_directory() { - local mount_type=`stat -f -L -c %T $1` - test "$mount_type" == "nfs" -} - -# Only run the command if the target file (the last arg) is not on an -# NFS filesystem. -function _safe_permission_operation() { - local args=( $@ ) - local last - local sudo_cmd - local dir_to_check - - let last="${#args[*]} - 1" - - dir_to_check=${args[$last]} - if [ ! -d "$dir_to_check" ]; then - dir_to_check=`dirname "$dir_to_check"` - fi - - if is_nfs_directory "$dir_to_check" ; then - return 0 - fi - - if [[ $TRACK_DEPENDS = True ]]; then - sudo_cmd="env" - else - sudo_cmd="sudo" - fi - - $sudo_cmd $@ -} - -# Only change ownership of a file or directory if it is not on an NFS -# filesystem. -function safe_chown() { - _safe_permission_operation chown $@ -} - -# Only change permissions of a file or directory if it is not on an -# NFS filesystem. -function safe_chmod() { - _safe_permission_operation chmod $@ -} # ``pip install -e`` the package, which processes the dependencies # using pip before running `setup.py develop` @@ -1340,6 +79,7 @@ function setup_develop() { fi } + # ``pip install -e`` the package, which processes the dependencies # using pip before running `setup.py develop` # Uses globals ``STACK_USER`` @@ -1353,43 +93,6 @@ function setup_develop_no_requirements_update() { } -# Service wrapper to start services -# start_service service-name -function start_service() { - if is_ubuntu; then - sudo /usr/sbin/service $1 start - else - sudo /sbin/service $1 start - fi -} - - -# Service wrapper to stop services -# stop_service service-name -function stop_service() { - if is_ubuntu; then - sudo /usr/sbin/service $1 stop - else - sudo /sbin/service $1 stop - fi -} - - -# Normalize config values to True or False -# Accepts as False: 0 no No NO false False FALSE -# Accepts as True: 1 yes Yes YES true True TRUE -# VAR=$(trueorfalse default-value test-value) -function trueorfalse() { - local default=$1 - local testval=$2 - - [[ -z "$testval" ]] && { echo "$default"; return; } - [[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; } - [[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; } - echo "$default" -} - - # Retrieve an image from a URL and upload into Glance. # Uses the following variables: # @@ -1685,23 +388,6 @@ function use_database { } -# Toggle enable/disable_service for services that must run exclusive of each other -# $1 The name of a variable containing a space-separated list of services -# $2 The name of a variable in which to store the enabled service's name -# $3 The name of the service to enable -function use_exclusive_service { - local options=${!1} - local selection=$3 - out=$2 - [ -z $selection ] || [[ ! "$options" =~ "$selection" ]] && return 1 - for opt in $options;do - [[ "$opt" = "$selection" ]] && enable_service $opt || disable_service $opt - done - eval "$out=$selection" - return 0 -} - - # Wait for an HTTP server to start answering requests # wait_for_service timeout url function wait_for_service() { @@ -1711,30 +397,6 @@ function wait_for_service() { } -# Wrapper for ``yum`` to set proxy environment variables -# Uses globals ``OFFLINE``, ``*_proxy`` -# yum_install package [package ...] -function yum_install() { - [[ "$OFFLINE" = "True" ]] && return - local sudo="sudo" - [[ "$(id -u)" = "0" ]] && sudo="env" - $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ - no_proxy=$no_proxy \ - yum install -y "$@" -} - - -# zypper wrapper to set arguments correctly -# zypper_install package [package ...] -function zypper_install() { - [[ "$OFFLINE" = "True" ]] && return - local sudo="sudo" - [[ "$(id -u)" = "0" ]] && sudo="env" - $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ - zypper --non-interactive install --auto-agree-with-licenses "$@" -} - - # ping check # Uses globals ``ENABLED_SERVICES`` # ping_check from-net ip boot-timeout expected @@ -1809,36 +471,6 @@ function _ssh_check_novanet() { } -# Add a user to a group. -# add_user_to_group user group -function add_user_to_group() { - local user=$1 - local group=$2 - - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - # SLE11 and openSUSE 12.2 don't have the usual usermod - if ! is_suse || [[ "$os_VENDOR" = "openSUSE" && "$os_RELEASE" != "12.2" ]]; then - sudo usermod -a -G "$group" "$user" - else - sudo usermod -A "$group" "$user" - fi -} - - -# Get the path to the direcotry where python executables are installed. -# get_python_exec_prefix -function get_python_exec_prefix() { - if is_fedora || is_suse; then - echo "/usr/bin" - else - echo "/usr/local/bin" - fi -} - - # Get the location of the $module-rootwrap executables, where module is cinder # or nova. # get_rootwrap_location module @@ -1849,17 +481,6 @@ function get_rootwrap_location() { } -# Get the path to the pip command. -# get_pip_command -function get_pip_command() { - which pip || which pip-python - - if [ $? -ne 0 ]; then - die $LINENO "Unable to find pip; cannot continue" - fi -} - - # Path permissions sanity check # check_path_perm_sanity path function check_path_perm_sanity() { @@ -1944,37 +565,6 @@ vercmp_numbers() { } -# ``policy_add policy_file policy_name policy_permissions`` -# -# Add a policy to a policy.json file -# Do nothing if the policy already exists - -function policy_add() { - local policy_file=$1 - local policy_name=$2 - local policy_perm=$3 - - if grep -q ${policy_name} ${policy_file}; then - echo "Policy ${policy_name} already exists in ${policy_file}" - return - fi - - # Add a terminating comma to policy lines without one - # Remove the closing '}' and all lines following to the end-of-file - local tmpfile=$(mktemp) - uniq ${policy_file} | sed -e ' - s/]$/],/ - /^[}]/,$d - ' > ${tmpfile} - - # Append policy and closing brace - echo " \"${policy_name}\": ${policy_perm}" >>${tmpfile} - echo "}" >>${tmpfile} - - mv ${tmpfile} ${policy_file} -} - - # This function sets log formatting options for colorizing log # output to stdout. It is meant to be called by lib modules. # The last two parameters are optional and can be used to specify @@ -1994,10 +584,10 @@ function setup_colorized_logging() { iniset $conf_file $conf_section logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" } + # Restore xtrace $XTRACE - # Local variables: # mode: shell-script # End: diff --git a/functions-common b/functions-common new file mode 100644 index 0000000000..0cecb0b9fb --- /dev/null +++ b/functions-common @@ -0,0 +1,1433 @@ +# functions-common - Common functions used by DevStack components +# +# The canonical copy of this file is maintained in the DevStack repo. +# All modifications should be made there and then sync'ed to other repos +# as required. +# +# This file is sorted alphabetically within the function groups. +# +# - Config Functions +# - Control Functions +# - Distro Functions +# - Git Functions +# - OpenStack Functions +# - Package Functions +# - Process Functions +# - Python Functions +# - Service Functions +# +# The following variables are assumed to be defined by certain functions: +# +# - ``ENABLED_SERVICES`` +# - ``ERROR_ON_CLONE`` +# - ``FILES`` +# - ``OFFLINE`` +# - ``PIP_DOWNLOAD_CACHE`` +# - ``PIP_USE_MIRRORS`` +# - ``RECLONE`` +# - ``TRACK_DEPENDS`` +# - ``http_proxy``, ``https_proxy``, ``no_proxy`` + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Config Functions +# ================ + +# Append a new option in an ini file without replacing the old value +# iniadd config-file section option value1 value2 value3 ... +function iniadd() { + local file=$1 + local section=$2 + local option=$3 + shift 3 + local values="$(iniget_multiline $file $section $option) $@" + iniset_multiline $file $section $option $values +} + +# Comment an option in an INI file +# inicomment config-file section option +function inicomment() { + local file=$1 + local section=$2 + local option=$3 + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" +} + +# Get an option from an INI file +# iniget config-file section option +function iniget() { + local file=$1 + local section=$2 + local option=$3 + local line + line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + echo ${line#*=} +} + +# Get a multiple line option from an INI file +# iniget_multiline config-file section option +function iniget_multiline() { + local file=$1 + local section=$2 + local option=$3 + local values + values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file") + echo ${values} +} + +# Determinate is the given option present in the INI file +# ini_has_option config-file section option +function ini_has_option() { + local file=$1 + local section=$2 + local option=$3 + local line + line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + [ -n "$line" ] +} + +# Set an option in an INI file +# iniset config-file section option value +function iniset() { + local file=$1 + local section=$2 + local option=$3 + local value=$4 + + [[ -z $section || -z $option ]] && return + + if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then + # Add section at the end + echo -e "\n[$section]" >>"$file" + fi + if ! ini_has_option "$file" "$section" "$option"; then + # Add it + sed -i -e "/^\[$section\]/ a\\ +$option = $value +" "$file" + else + local sep=$(echo -ne "\x01") + # Replace it + sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" + fi +} + +# Set a multiple line option in an INI file +# iniset_multiline config-file section option value1 value2 valu3 ... +function iniset_multiline() { + local file=$1 + local section=$2 + local option=$3 + shift 3 + local values + for v in $@; do + # The later sed command inserts each new value in the line next to + # the section identifier, which causes the values to be inserted in + # the reverse order. Do a reverse here to keep the original order. + values="$v ${values}" + done + if ! grep -q "^\[$section\]" "$file"; then + # Add section at the end + echo -e "\n[$section]" >>"$file" + else + # Remove old values + sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file" + fi + # Add new ones + for v in $values; do + sed -i -e "/^\[$section\]/ a\\ +$option = $v +" "$file" + done +} + +# Uncomment an option in an INI file +# iniuncomment config-file section option +function iniuncomment() { + local file=$1 + local section=$2 + local option=$3 + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file" +} + +# Normalize config values to True or False +# Accepts as False: 0 no No NO false False FALSE +# Accepts as True: 1 yes Yes YES true True TRUE +# VAR=$(trueorfalse default-value test-value) +function trueorfalse() { + local default=$1 + local testval=$2 + + [[ -z "$testval" ]] && { echo "$default"; return; } + [[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; } + [[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; } + echo "$default" +} + + +# Control Functions +# ================= + +# Prints backtrace info +# filename:lineno:function +# backtrace level +function backtrace { + local level=$1 + local deep=$((${#BASH_SOURCE[@]} - 1)) + echo "[Call Trace]" + while [ $level -le $deep ]; do + echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}" + deep=$((deep - 1)) + done +} + +# Prints line number and "message" then exits +# die $LINENO "message" +function die() { + local exitcode=$? + set +o xtrace + local line=$1; shift + if [ $exitcode == 0 ]; then + exitcode=1 + fi + backtrace 2 + err $line "$*" + exit $exitcode +} + +# Checks an environment variable is not set or has length 0 OR if the +# exit code is non-zero and prints "message" and exits +# NOTE: env-var is the variable name without a '$' +# die_if_not_set $LINENO env-var "message" +function die_if_not_set() { + local exitcode=$? + FXTRACE=$(set +o | grep xtrace) + set +o xtrace + local line=$1; shift + local evar=$1; shift + if ! is_set $evar || [ $exitcode != 0 ]; then + die $line "$*" + fi + $FXTRACE +} + +# Prints line number and "message" in error format +# err $LINENO "message" +function err() { + local exitcode=$? + errXTRACE=$(set +o | grep xtrace) + set +o xtrace + local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2" + echo $msg 1>&2; + if [[ -n ${SCREEN_LOGDIR} ]]; then + echo $msg >> "${SCREEN_LOGDIR}/error.log" + fi + $errXTRACE + return $exitcode +} + +# Checks an environment variable is not set or has length 0 OR if the +# exit code is non-zero and prints "message" +# NOTE: env-var is the variable name without a '$' +# err_if_not_set $LINENO env-var "message" +function err_if_not_set() { + local exitcode=$? + errinsXTRACE=$(set +o | grep xtrace) + set +o xtrace + local line=$1; shift + local evar=$1; shift + if ! is_set $evar || [ $exitcode != 0 ]; then + err $line "$*" + fi + $errinsXTRACE + return $exitcode +} + +# Exit after outputting a message about the distribution not being supported. +# exit_distro_not_supported [optional-string-telling-what-is-missing] +function exit_distro_not_supported { + if [[ -z "$DISTRO" ]]; then + GetDistro + fi + + if [ $# -gt 0 ]; then + die $LINENO "Support for $DISTRO is incomplete: no support for $@" + else + die $LINENO "Support for $DISTRO is incomplete." + fi +} + +# Test if the named environment variable is set and not zero length +# is_set env-var +function is_set() { + local var=\$"$1" + eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this +} + +# Prints line number and "message" in warning format +# warn $LINENO "message" +function warn() { + local exitcode=$? + errXTRACE=$(set +o | grep xtrace) + set +o xtrace + local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2" + echo $msg 1>&2; + if [[ -n ${SCREEN_LOGDIR} ]]; then + echo $msg >> "${SCREEN_LOGDIR}/error.log" + fi + $errXTRACE + return $exitcode +} + + +# Distro Functions +# ================ + +# Determine OS Vendor, Release and Update +# Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora +# Returns results in global variables: +# os_VENDOR - vendor name +# os_RELEASE - release +# os_UPDATE - update +# os_PACKAGE - package type +# os_CODENAME - vendor's codename for release +# GetOSVersion +GetOSVersion() { + # Figure out which vendor we are + if [[ -x "`which sw_vers 2>/dev/null`" ]]; then + # OS/X + os_VENDOR=`sw_vers -productName` + os_RELEASE=`sw_vers -productVersion` + os_UPDATE=${os_RELEASE##*.} + os_RELEASE=${os_RELEASE%.*} + os_PACKAGE="" + if [[ "$os_RELEASE" =~ "10.7" ]]; then + os_CODENAME="lion" + elif [[ "$os_RELEASE" =~ "10.6" ]]; then + os_CODENAME="snow leopard" + elif [[ "$os_RELEASE" =~ "10.5" ]]; then + os_CODENAME="leopard" + elif [[ "$os_RELEASE" =~ "10.4" ]]; then + os_CODENAME="tiger" + elif [[ "$os_RELEASE" =~ "10.3" ]]; then + os_CODENAME="panther" + else + os_CODENAME="" + fi + elif [[ -x $(which lsb_release 2>/dev/null) ]]; then + os_VENDOR=$(lsb_release -i -s) + os_RELEASE=$(lsb_release -r -s) + os_UPDATE="" + os_PACKAGE="rpm" + if [[ "Debian,Ubuntu,LinuxMint" =~ $os_VENDOR ]]; then + os_PACKAGE="deb" + elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then + lsb_release -d -s | grep -q openSUSE + if [[ $? -eq 0 ]]; then + os_VENDOR="openSUSE" + fi + elif [[ $os_VENDOR == "openSUSE project" ]]; then + os_VENDOR="openSUSE" + elif [[ $os_VENDOR =~ Red.*Hat ]]; then + os_VENDOR="Red Hat" + fi + os_CODENAME=$(lsb_release -c -s) + elif [[ -r /etc/redhat-release ]]; then + # Red Hat Enterprise Linux Server release 5.5 (Tikanga) + # Red Hat Enterprise Linux Server release 7.0 Beta (Maipo) + # CentOS release 5.5 (Final) + # CentOS Linux release 6.0 (Final) + # Fedora release 16 (Verne) + # XenServer release 6.2.0-70446c (xenenterprise) + os_CODENAME="" + for r in "Red Hat" CentOS Fedora XenServer; do + os_VENDOR=$r + if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then + ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release` + os_CODENAME=${ver#*|} + os_RELEASE=${ver%|*} + os_UPDATE=${os_RELEASE##*.} + os_RELEASE=${os_RELEASE%.*} + break + fi + os_VENDOR="" + done + os_PACKAGE="rpm" + elif [[ -r /etc/SuSE-release ]]; then + for r in openSUSE "SUSE Linux"; do + if [[ "$r" = "SUSE Linux" ]]; then + os_VENDOR="SUSE LINUX" + else + os_VENDOR=$r + fi + + if [[ -n "`grep \"$r\" /etc/SuSE-release`" ]]; then + os_CODENAME=`grep "CODENAME = " /etc/SuSE-release | sed 's:.* = ::g'` + os_RELEASE=`grep "VERSION = " /etc/SuSE-release | sed 's:.* = ::g'` + os_UPDATE=`grep "PATCHLEVEL = " /etc/SuSE-release | sed 's:.* = ::g'` + break + fi + os_VENDOR="" + done + os_PACKAGE="rpm" + # If lsb_release is not installed, we should be able to detect Debian OS + elif [[ -f /etc/debian_version ]] && [[ $(cat /proc/version) =~ "Debian" ]]; then + os_VENDOR="Debian" + os_PACKAGE="deb" + os_CODENAME=$(awk '/VERSION=/' /etc/os-release | sed 's/VERSION=//' | sed -r 's/\"|\(|\)//g' | awk '{print $2}') + os_RELEASE=$(awk '/VERSION_ID=/' /etc/os-release | sed 's/VERSION_ID=//' | sed 's/\"//g') + fi + export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME +} + +# Translate the OS version values into common nomenclature +# Sets global ``DISTRO`` from the ``os_*`` values +function GetDistro() { + GetOSVersion + if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then + # 'Everyone' refers to Ubuntu / Debian releases by the code name adjective + DISTRO=$os_CODENAME + elif [[ "$os_VENDOR" =~ (Fedora) ]]; then + # For Fedora, just use 'f' and the release + DISTRO="f$os_RELEASE" + elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then + DISTRO="opensuse-$os_RELEASE" + elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then + # For SLE, also use the service pack + if [[ -z "$os_UPDATE" ]]; then + DISTRO="sle${os_RELEASE}" + else + DISTRO="sle${os_RELEASE}sp${os_UPDATE}" + fi + elif [[ "$os_VENDOR" =~ (Red Hat) || "$os_VENDOR" =~ (CentOS) ]]; then + # Drop the . release as we assume it's compatible + DISTRO="rhel${os_RELEASE::1}" + elif [[ "$os_VENDOR" =~ (XenServer) ]]; then + DISTRO="xs$os_RELEASE" + else + # Catch-all for now is Vendor + Release + Update + DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE" + fi + export DISTRO +} + +# Utility function for checking machine architecture +# is_arch arch-type +function is_arch { + ARCH_TYPE=$1 + + [[ "$(uname -m)" == "$ARCH_TYPE" ]] +} + +# Determine if current distribution is a Fedora-based distribution +# (Fedora, RHEL, CentOS, etc). +# is_fedora +function is_fedora { + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || [ "$os_VENDOR" = "CentOS" ] +} + + +# Determine if current distribution is a SUSE-based distribution +# (openSUSE, SLE). +# is_suse +function is_suse { + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + [ "$os_VENDOR" = "openSUSE" ] || [ "$os_VENDOR" = "SUSE LINUX" ] +} + + +# Determine if current distribution is an Ubuntu-based distribution +# It will also detect non-Ubuntu but Debian-based distros +# is_ubuntu +function is_ubuntu { + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + [ "$os_PACKAGE" = "deb" ] +} + + +# Git Functions +# ============= + +# git clone only if directory doesn't exist already. Since ``DEST`` might not +# be owned by the installation user, we create the directory and change the +# ownership to the proper user. +# Set global RECLONE=yes to simulate a clone when dest-dir exists +# Set global ERROR_ON_CLONE=True to abort execution with an error if the git repo +# does not exist (default is False, meaning the repo will be cloned). +# Uses global ``OFFLINE`` +# git_clone remote dest-dir branch +function git_clone { + GIT_REMOTE=$1 + GIT_DEST=$2 + GIT_REF=$3 + RECLONE=$(trueorfalse False $RECLONE) + + if [[ "$OFFLINE" = "True" ]]; then + echo "Running in offline mode, clones already exist" + # print out the results so we know what change was used in the logs + cd $GIT_DEST + git show --oneline | head -1 + return + fi + + if echo $GIT_REF | egrep -q "^refs"; then + # If our branch name is a gerrit style refs/changes/... + if [[ ! -d $GIT_DEST ]]; then + [[ "$ERROR_ON_CLONE" = "True" ]] && \ + die $LINENO "Cloning not allowed in this configuration" + git clone $GIT_REMOTE $GIT_DEST + fi + cd $GIT_DEST + git fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD + else + # do a full clone only if the directory doesn't exist + if [[ ! -d $GIT_DEST ]]; then + [[ "$ERROR_ON_CLONE" = "True" ]] && \ + die $LINENO "Cloning not allowed in this configuration" + git clone $GIT_REMOTE $GIT_DEST + cd $GIT_DEST + # This checkout syntax works for both branches and tags + git checkout $GIT_REF + elif [[ "$RECLONE" = "True" ]]; then + # if it does exist then simulate what clone does if asked to RECLONE + cd $GIT_DEST + # set the url to pull from and fetch + git remote set-url origin $GIT_REMOTE + git fetch origin + # remove the existing ignored files (like pyc) as they cause breakage + # (due to the py files having older timestamps than our pyc, so python + # thinks the pyc files are correct using them) + find $GIT_DEST -name '*.pyc' -delete + + # handle GIT_REF accordingly to type (tag, branch) + if [[ -n "`git show-ref refs/tags/$GIT_REF`" ]]; then + git_update_tag $GIT_REF + elif [[ -n "`git show-ref refs/heads/$GIT_REF`" ]]; then + git_update_branch $GIT_REF + elif [[ -n "`git show-ref refs/remotes/origin/$GIT_REF`" ]]; then + git_update_remote_branch $GIT_REF + else + die $LINENO "$GIT_REF is neither branch nor tag" + fi + + fi + fi + + # print out the results so we know what change was used in the logs + cd $GIT_DEST + git show --oneline | head -1 +} + +# git update using reference as a branch. +# git_update_branch ref +function git_update_branch() { + + GIT_BRANCH=$1 + + git checkout -f origin/$GIT_BRANCH + # a local branch might not exist + git branch -D $GIT_BRANCH || true + git checkout -b $GIT_BRANCH +} + +# git update using reference as a branch. +# git_update_remote_branch ref +function git_update_remote_branch() { + + GIT_BRANCH=$1 + + git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH +} + +# git update using reference as a tag. Be careful editing source at that repo +# as working copy will be in a detached mode +# git_update_tag ref +function git_update_tag() { + + GIT_TAG=$1 + + git tag -d $GIT_TAG + # fetching given tag only + git fetch origin tag $GIT_TAG + git checkout -f $GIT_TAG +} + + +# OpenStack Functions +# =================== + +# Get the default value for HOST_IP +# get_default_host_ip fixed_range floating_range host_ip_iface host_ip +function get_default_host_ip() { + local fixed_range=$1 + local floating_range=$2 + local host_ip_iface=$3 + local host_ip=$4 + + # Find the interface used for the default route + host_ip_iface=${host_ip_iface:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)} + # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable + if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then + host_ip="" + host_ips=`LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/"); print parts[1]}'` + for IP in $host_ips; do + # Attempt to filter out IP addresses that are part of the fixed and + # floating range. Note that this method only works if the ``netaddr`` + # python library is installed. If it is not installed, an error + # will be printed and the first IP from the interface will be used. + # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct + # address. + if ! (address_in_net $IP $fixed_range || address_in_net $IP $floating_range); then + host_ip=$IP + break; + fi + done + fi + echo $host_ip +} + +# Grab a numbered field from python prettytable output +# Fields are numbered starting with 1 +# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc. +# get_field field-number +function get_field() { + while read data; do + if [ "$1" -lt 0 ]; then + field="(\$(NF$1))" + else + field="\$$(($1 + 1))" + fi + echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}" + done +} + +# Add a policy to a policy.json file +# Do nothing if the policy already exists +# ``policy_add policy_file policy_name policy_permissions`` +function policy_add() { + local policy_file=$1 + local policy_name=$2 + local policy_perm=$3 + + if grep -q ${policy_name} ${policy_file}; then + echo "Policy ${policy_name} already exists in ${policy_file}" + return + fi + + # Add a terminating comma to policy lines without one + # Remove the closing '}' and all lines following to the end-of-file + local tmpfile=$(mktemp) + uniq ${policy_file} | sed -e ' + s/]$/],/ + /^[}]/,$d + ' > ${tmpfile} + + # Append policy and closing brace + echo " \"${policy_name}\": ${policy_perm}" >>${tmpfile} + echo "}" >>${tmpfile} + + mv ${tmpfile} ${policy_file} +} + + +# Package Functions +# ================= + +# _get_package_dir +function _get_package_dir() { + local pkg_dir + if is_ubuntu; then + pkg_dir=$FILES/apts + elif is_fedora; then + pkg_dir=$FILES/rpms + elif is_suse; then + pkg_dir=$FILES/rpms-suse + else + exit_distro_not_supported "list of packages" + fi + echo "$pkg_dir" +} + +# Wrapper for ``apt-get`` to set cache and proxy environment variables +# Uses globals ``OFFLINE``, ``*_proxy`` +# apt_get operation package [package ...] +function apt_get() { + [[ "$OFFLINE" = "True" || -z "$@" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + $sudo DEBIAN_FRONTEND=noninteractive \ + http_proxy=$http_proxy https_proxy=$https_proxy \ + no_proxy=$no_proxy \ + apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@" +} + +# get_packages() collects a list of package names of any type from the +# prerequisite files in ``files/{apts|rpms}``. The list is intended +# to be passed to a package installer such as apt or yum. +# +# Only packages required for the services in 1st argument will be +# included. Two bits of metadata are recognized in the prerequisite files: +# +# - ``# NOPRIME`` defers installation to be performed later in `stack.sh` +# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection +# of the package to the distros listed. The distro names are case insensitive. +function get_packages() { + local services=$@ + local package_dir=$(_get_package_dir) + local file_to_parse + local service + + if [[ -z "$package_dir" ]]; then + echo "No package directory supplied" + return 1 + fi + if [[ -z "$DISTRO" ]]; then + GetDistro + fi + for service in ${services//,/ }; do + # Allow individual services to specify dependencies + if [[ -e ${package_dir}/${service} ]]; then + file_to_parse="${file_to_parse} $service" + fi + # NOTE(sdague) n-api needs glance for now because that's where + # glance client is + if [[ $service == n-api ]]; then + if [[ ! $file_to_parse =~ nova ]]; then + file_to_parse="${file_to_parse} nova" + fi + if [[ ! $file_to_parse =~ glance ]]; then + file_to_parse="${file_to_parse} glance" + fi + elif [[ $service == c-* ]]; then + if [[ ! $file_to_parse =~ cinder ]]; then + file_to_parse="${file_to_parse} cinder" + fi + elif [[ $service == ceilometer-* ]]; then + if [[ ! $file_to_parse =~ ceilometer ]]; then + file_to_parse="${file_to_parse} ceilometer" + fi + elif [[ $service == s-* ]]; then + if [[ ! $file_to_parse =~ swift ]]; then + file_to_parse="${file_to_parse} swift" + fi + elif [[ $service == n-* ]]; then + if [[ ! $file_to_parse =~ nova ]]; then + file_to_parse="${file_to_parse} nova" + fi + elif [[ $service == g-* ]]; then + if [[ ! $file_to_parse =~ glance ]]; then + file_to_parse="${file_to_parse} glance" + fi + elif [[ $service == key* ]]; then + if [[ ! $file_to_parse =~ keystone ]]; then + file_to_parse="${file_to_parse} keystone" + fi + elif [[ $service == q-* ]]; then + if [[ ! $file_to_parse =~ neutron ]]; then + file_to_parse="${file_to_parse} neutron" + fi + fi + done + + for file in ${file_to_parse}; do + local fname=${package_dir}/${file} + local OIFS line package distros distro + [[ -e $fname ]] || continue + + OIFS=$IFS + IFS=$'\n' + for line in $(<${fname}); do + if [[ $line =~ "NOPRIME" ]]; then + continue + fi + + # Assume we want this package + package=${line%#*} + inst_pkg=1 + + # Look for # dist:xxx in comment + if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then + # We are using BASH regexp matching feature. + package=${BASH_REMATCH[1]} + distros=${BASH_REMATCH[2]} + # In bash ${VAR,,} will lowecase VAR + # Look for a match in the distro list + if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then + # If no match then skip this package + inst_pkg=0 + fi + fi + + # Look for # testonly in comment + if [[ $line =~ (.*)#.*testonly.* ]]; then + package=${BASH_REMATCH[1]} + # Are we installing test packages? (test for the default value) + if [[ $INSTALL_TESTONLY_PACKAGES = "False" ]]; then + # If not installing test packages the skip this package + inst_pkg=0 + fi + fi + + if [[ $inst_pkg = 1 ]]; then + echo $package + fi + done + IFS=$OIFS + done +} + +# Distro-agnostic package installer +# install_package package [package ...] +function install_package() { + if is_ubuntu; then + [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update + NO_UPDATE_REPOS=True + + apt_get install "$@" + elif is_fedora; then + yum_install "$@" + elif is_suse; then + zypper_install "$@" + else + exit_distro_not_supported "installing packages" + fi +} + +# Distro-agnostic function to tell if a package is installed +# is_package_installed package [package ...] +function is_package_installed() { + if [[ -z "$@" ]]; then + return 1 + fi + + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + + if [[ "$os_PACKAGE" = "deb" ]]; then + dpkg -s "$@" > /dev/null 2> /dev/null + elif [[ "$os_PACKAGE" = "rpm" ]]; then + rpm --quiet -q "$@" + else + exit_distro_not_supported "finding if a package is installed" + fi +} + +# Distro-agnostic package uninstaller +# uninstall_package package [package ...] +function uninstall_package() { + if is_ubuntu; then + apt_get purge "$@" + elif is_fedora; then + sudo yum remove -y "$@" + elif is_suse; then + sudo zypper rm "$@" + else + exit_distro_not_supported "uninstalling packages" + fi +} + +# Wrapper for ``yum`` to set proxy environment variables +# Uses globals ``OFFLINE``, ``*_proxy`` +# yum_install package [package ...] +function yum_install() { + [[ "$OFFLINE" = "True" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ + no_proxy=$no_proxy \ + yum install -y "$@" +} + +# zypper wrapper to set arguments correctly +# zypper_install package [package ...] +function zypper_install() { + [[ "$OFFLINE" = "True" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ + zypper --non-interactive install --auto-agree-with-licenses "$@" +} + + +# Process Functions +# ================= + +# _run_process() is designed to be backgrounded by run_process() to simulate a +# fork. It includes the dirty work of closing extra filehandles and preparing log +# files to produce the same logs as screen_it(). The log filename is derived +# from the service name and global-and-now-misnamed SCREEN_LOGDIR +# _run_process service "command-line" +function _run_process() { + local service=$1 + local command="$2" + + # Undo logging redirections and close the extra descriptors + exec 1>&3 + exec 2>&3 + exec 3>&- + exec 6>&- + + if [[ -n ${SCREEN_LOGDIR} ]]; then + exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1 + ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log + + # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs. + export PYTHONUNBUFFERED=1 + fi + + exec /bin/bash -c "$command" + die "$service exec failure: $command" +} + +# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``. +# This is used for ``service_check`` when all the ``screen_it`` are called finished +# init_service_check +function init_service_check() { + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + + if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then + mkdir -p "$SERVICE_DIR/$SCREEN_NAME" + fi + + rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure +} + +# Find out if a process exists by partial name. +# is_running name +function is_running() { + local name=$1 + ps auxw | grep -v grep | grep ${name} > /dev/null + RC=$? + # some times I really hate bash reverse binary logic + return $RC +} + +# run_process() launches a child process that closes all file descriptors and +# then exec's the passed in command. This is meant to duplicate the semantics +# of screen_it() without screen. PIDs are written to +# $SERVICE_DIR/$SCREEN_NAME/$service.pid +# run_process service "command-line" +function run_process() { + local service=$1 + local command="$2" + + # Spawn the child process + _run_process "$service" "$command" & + echo $! +} + +# Helper to launch a service in a named screen +# screen_it service "command-line" +function screen_it { + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + USE_SCREEN=$(trueorfalse True $USE_SCREEN) + + if is_service_enabled $1; then + # Append the service to the screen rc file + screen_rc "$1" "$2" + + if [[ "$USE_SCREEN" = "True" ]]; then + screen -S $SCREEN_NAME -X screen -t $1 + + if [[ -n ${SCREEN_LOGDIR} ]]; then + screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log + screen -S $SCREEN_NAME -p $1 -X log on + ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log + fi + + # sleep to allow bash to be ready to be send the command - we are + # creating a new window in screen and then sends characters, so if + # bash isn't running by the time we send the command, nothing happens + sleep 1.5 + + NL=`echo -ne '\015'` + # This fun command does the following: + # - the passed server command is backgrounded + # - the pid of the background process is saved in the usual place + # - the server process is brought back to the foreground + # - if the server process exits prematurely the fg command errors + # and a message is written to stdout and the service failure file + # The pid saved can be used in screen_stop() as a process group + # id to kill off all child processes + screen -S $SCREEN_NAME -p $1 -X stuff "$2 & echo \$! >$SERVICE_DIR/$SCREEN_NAME/$1.pid; fg || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" + else + # Spawn directly without screen + run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid + fi + fi +} + +# Screen rc file builder +# screen_rc service "command-line" +function screen_rc { + SCREEN_NAME=${SCREEN_NAME:-stack} + SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc + if [[ ! -e $SCREENRC ]]; then + # Name the screen session + echo "sessionname $SCREEN_NAME" > $SCREENRC + # Set a reasonable statusbar + echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC + # Some distributions override PROMPT_COMMAND for the screen terminal type - turn that off + echo "setenv PROMPT_COMMAND /bin/true" >> $SCREENRC + echo "screen -t shell bash" >> $SCREENRC + fi + # If this service doesn't already exist in the screenrc file + if ! grep $1 $SCREENRC 2>&1 > /dev/null; then + NL=`echo -ne '\015'` + echo "screen -t $1 bash" >> $SCREENRC + echo "stuff \"$2$NL\"" >> $SCREENRC + + if [[ -n ${SCREEN_LOGDIR} ]]; then + echo "logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log" >>$SCREENRC + echo "log on" >>$SCREENRC + fi + fi +} + +# Stop a service in screen +# If a PID is available use it, kill the whole process group via TERM +# If screen is being used kill the screen window; this will catch processes +# that did not leave a PID behind +# screen_stop service +function screen_stop() { + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + USE_SCREEN=$(trueorfalse True $USE_SCREEN) + + if is_service_enabled $1; then + # Kill via pid if we have one available + if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then + pkill -TERM -P -$(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid) + rm $SERVICE_DIR/$SCREEN_NAME/$1.pid + fi + if [[ "$USE_SCREEN" = "True" ]]; then + # Clean up the screen window + screen -S $SCREEN_NAME -p $1 -X kill + fi + fi +} + +# Helper to get the status of each running service +# service_check +function service_check() { + local service + local failures + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + + + if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then + echo "No service status directory found" + return + fi + + # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME + failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null` + + for service in $failures; do + service=`basename $service` + service=${service%.failure} + echo "Error: Service $service is not running" + done + + if [ -n "$failures" ]; then + echo "More details about the above errors can be found with screen, with ./rejoin-stack.sh" + fi +} + + +# Python Functions +# ================ + +# Get the path to the pip command. +# get_pip_command +function get_pip_command() { + which pip || which pip-python + + if [ $? -ne 0 ]; then + die $LINENO "Unable to find pip; cannot continue" + fi +} + +# Get the path to the direcotry where python executables are installed. +# get_python_exec_prefix +function get_python_exec_prefix() { + if is_fedora || is_suse; then + echo "/usr/bin" + else + echo "/usr/local/bin" + fi +} + +# Wrapper for ``pip install`` to set cache and proxy environment variables +# Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``PIP_USE_MIRRORS``, +# ``TRACK_DEPENDS``, ``*_proxy`` +# pip_install package [package ...] +function pip_install { + [[ "$OFFLINE" = "True" || -z "$@" ]] && return + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + if [[ $TRACK_DEPENDS = True ]]; then + source $DEST/.venv/bin/activate + CMD_PIP=$DEST/.venv/bin/pip + SUDO_PIP="env" + else + SUDO_PIP="sudo" + CMD_PIP=$(get_pip_command) + fi + + # Mirror option not needed anymore because pypi has CDN available, + # but it's useful in certain circumstances + PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False} + if [[ "$PIP_USE_MIRRORS" != "False" ]]; then + PIP_MIRROR_OPT="--use-mirrors" + fi + + # pip < 1.4 has a bug where it will use an already existing build + # directory unconditionally. Say an earlier component installs + # foo v1.1; pip will have built foo's source in + # /tmp/$USER-pip-build. Even if a later component specifies foo < + # 1.1, the existing extracted build will be used and cause + # confusing errors. By creating unique build directories we avoid + # this problem. See https://github.com/pypa/pip/issues/709 + local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX) + + $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \ + HTTP_PROXY=$http_proxy \ + HTTPS_PROXY=$https_proxy \ + NO_PROXY=$no_proxy \ + $CMD_PIP install --build=${pip_build_tmp} \ + $PIP_MIRROR_OPT $@ \ + && $SUDO_PIP rm -rf ${pip_build_tmp} +} + + +# Service Functions +# ================= + +# remove extra commas from the input string (i.e. ``ENABLED_SERVICES``) +# _cleanup_service_list service-list +function _cleanup_service_list () { + echo "$1" | sed -e ' + s/,,/,/g; + s/^,//; + s/,$// + ' +} + +# disable_all_services() removes all current services +# from ``ENABLED_SERVICES`` to reset the configuration +# before a minimal installation +# Uses global ``ENABLED_SERVICES`` +# disable_all_services +function disable_all_services() { + ENABLED_SERVICES="" +} + +# Remove all services starting with '-'. For example, to install all default +# services except rabbit (rabbit) set in ``localrc``: +# ENABLED_SERVICES+=",-rabbit" +# Uses global ``ENABLED_SERVICES`` +# disable_negated_services +function disable_negated_services() { + local tmpsvcs="${ENABLED_SERVICES}" + local service + for service in ${tmpsvcs//,/ }; do + if [[ ${service} == -* ]]; then + tmpsvcs=$(echo ${tmpsvcs}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g") + fi + done + ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") +} + +# disable_service() removes the services passed as argument to the +# ``ENABLED_SERVICES`` list, if they are present. +# +# For example: +# disable_service rabbit +# +# This function does not know about the special cases +# for nova, glance, and neutron built into is_service_enabled(). +# Uses global ``ENABLED_SERVICES`` +# disable_service service [service ...] +function disable_service() { + local tmpsvcs=",${ENABLED_SERVICES}," + local service + for service in $@; do + if is_service_enabled $service; then + tmpsvcs=${tmpsvcs//,$service,/,} + fi + done + ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") +} + +# enable_service() adds the services passed as argument to the +# ``ENABLED_SERVICES`` list, if they are not already present. +# +# For example: +# enable_service qpid +# +# This function does not know about the special cases +# for nova, glance, and neutron built into is_service_enabled(). +# Uses global ``ENABLED_SERVICES`` +# enable_service service [service ...] +function enable_service() { + local tmpsvcs="${ENABLED_SERVICES}" + for service in $@; do + if ! is_service_enabled $service; then + tmpsvcs+=",$service" + fi + done + ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") + disable_negated_services +} + +# is_service_enabled() checks if the service(s) specified as arguments are +# enabled by the user in ``ENABLED_SERVICES``. +# +# Multiple services specified as arguments are ``OR``'ed together; the test +# is a short-circuit boolean, i.e it returns on the first match. +# +# There are special cases for some 'catch-all' services:: +# **nova** returns true if any service enabled start with **n-** +# **cinder** returns true if any service enabled start with **c-** +# **ceilometer** returns true if any service enabled start with **ceilometer** +# **glance** returns true if any service enabled start with **g-** +# **neutron** returns true if any service enabled start with **q-** +# **swift** returns true if any service enabled start with **s-** +# **trove** returns true if any service enabled start with **tr-** +# For backward compatibility if we have **swift** in ENABLED_SERVICES all the +# **s-** services will be enabled. This will be deprecated in the future. +# +# Cells within nova is enabled if **n-cell** is in ``ENABLED_SERVICES``. +# We also need to make sure to treat **n-cell-region** and **n-cell-child** +# as enabled in this case. +# +# Uses global ``ENABLED_SERVICES`` +# is_service_enabled service [service ...] +function is_service_enabled() { + services=$@ + for service in ${services}; do + [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 + + # Look for top-level 'enabled' function for this service + if type is_${service}_enabled >/dev/null 2>&1; then + # A function exists for this service, use it + is_${service}_enabled + return $? + fi + + # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled() + # are implemented + + [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && return 0 + [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0 + [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0 + [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0 + [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0 + [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && return 0 + [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 + [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && return 0 + [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0 + [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0 + done + return 1 +} + +# Toggle enable/disable_service for services that must run exclusive of each other +# $1 The name of a variable containing a space-separated list of services +# $2 The name of a variable in which to store the enabled service's name +# $3 The name of the service to enable +function use_exclusive_service { + local options=${!1} + local selection=$3 + out=$2 + [ -z $selection ] || [[ ! "$options" =~ "$selection" ]] && return 1 + for opt in $options;do + [[ "$opt" = "$selection" ]] && enable_service $opt || disable_service $opt + done + eval "$out=$selection" + return 0 +} + + +# System Function +# =============== + +# Only run the command if the target file (the last arg) is not on an +# NFS filesystem. +function _safe_permission_operation() { + local args=( $@ ) + local last + local sudo_cmd + local dir_to_check + + let last="${#args[*]} - 1" + + dir_to_check=${args[$last]} + if [ ! -d "$dir_to_check" ]; then + dir_to_check=`dirname "$dir_to_check"` + fi + + if is_nfs_directory "$dir_to_check" ; then + return 0 + fi + + if [[ $TRACK_DEPENDS = True ]]; then + sudo_cmd="env" + else + sudo_cmd="sudo" + fi + + $sudo_cmd $@ +} + +# Exit 0 if address is in network or 1 if address is not in network +# ip-range is in CIDR notation: 1.2.3.4/20 +# address_in_net ip-address ip-range +function address_in_net() { + local ip=$1 + local range=$2 + local masklen=${range#*/} + local network=$(maskip ${range%/*} $(cidr2netmask $masklen)) + local subnet=$(maskip $ip $(cidr2netmask $masklen)) + [[ $network == $subnet ]] +} + +# Add a user to a group. +# add_user_to_group user group +function add_user_to_group() { + local user=$1 + local group=$2 + + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + # SLE11 and openSUSE 12.2 don't have the usual usermod + if ! is_suse || [[ "$os_VENDOR" = "openSUSE" && "$os_RELEASE" != "12.2" ]]; then + sudo usermod -a -G "$group" "$user" + else + sudo usermod -A "$group" "$user" + fi +} + +# Convert CIDR notation to a IPv4 netmask +# cidr2netmask cidr-bits +function cidr2netmask() { + local maskpat="255 255 255 255" + local maskdgt="254 252 248 240 224 192 128" + set -- ${maskpat:0:$(( ($1 / 8) * 4 ))}${maskdgt:$(( (7 - ($1 % 8)) * 4 )):3} + echo ${1-0}.${2-0}.${3-0}.${4-0} +} + +# Gracefully cp only if source file/dir exists +# cp_it source destination +function cp_it { + if [ -e $1 ] || [ -d $1 ]; then + cp -pRL $1 $2 + fi +} + +# HTTP and HTTPS proxy servers are supported via the usual environment variables [1] +# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in +# ``localrc`` or on the command line if necessary:: +# +# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html +# +# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh + +function export_proxy_variables() { + if [[ -n "$http_proxy" ]]; then + export http_proxy=$http_proxy + fi + if [[ -n "$https_proxy" ]]; then + export https_proxy=$https_proxy + fi + if [[ -n "$no_proxy" ]]; then + export no_proxy=$no_proxy + fi +} + +# Returns true if the directory is on a filesystem mounted via NFS. +function is_nfs_directory() { + local mount_type=`stat -f -L -c %T $1` + test "$mount_type" == "nfs" +} + +# Return the network portion of the given IP address using netmask +# netmask is in the traditional dotted-quad format +# maskip ip-address netmask +function maskip() { + local ip=$1 + local mask=$2 + local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}" + local subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.})) + echo $subnet +} + +# Service wrapper to restart services +# restart_service service-name +function restart_service() { + if is_ubuntu; then + sudo /usr/sbin/service $1 restart + else + sudo /sbin/service $1 restart + fi +} + +# Only change permissions of a file or directory if it is not on an +# NFS filesystem. +function safe_chmod() { + _safe_permission_operation chmod $@ +} + +# Only change ownership of a file or directory if it is not on an NFS +# filesystem. +function safe_chown() { + _safe_permission_operation chown $@ +} + +# Service wrapper to start services +# start_service service-name +function start_service() { + if is_ubuntu; then + sudo /usr/sbin/service $1 start + else + sudo /sbin/service $1 start + fi +} + +# Service wrapper to stop services +# stop_service service-name +function stop_service() { + if is_ubuntu; then + sudo /usr/sbin/service $1 stop + else + sudo /sbin/service $1 stop + fi +} + + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: From abc7b1d765665b66a027fe93c841b62e537c7843 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 12 Feb 2014 12:09:22 -0600 Subject: [PATCH 0456/4438] Backport Grenade updates Backport changes made in Grenade's copy of functions since the last sync: * d0654b9,i 4c7726e - get_release_name_from_branch() * 7907766 - edits to install_package() Change-Id: I0714c0b1072f1360c3c08fe24225e65e2a550fad --- functions-common | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 0cecb0b9fb..d92e39cd91 100644 --- a/functions-common +++ b/functions-common @@ -460,6 +460,17 @@ function is_ubuntu { # Git Functions # ============= +# Returns openstack release name for a given branch name +# ``get_release_name_from_branch branch-name`` +function get_release_name_from_branch(){ + local branch=$1 + if [[ $branch =~ "stable/" ]]; then + echo ${branch#*/} + else + echo "master" + fi +} + # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the # ownership to the proper user. @@ -792,7 +803,9 @@ function get_packages() { # install_package package [package ...] function install_package() { if is_ubuntu; then - [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update + # if there are transient errors pulling the updates, that's fine. It may + # be secondary repositories that we don't really care about. + [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update || /bin/true NO_UPDATE_REPOS=True apt_get install "$@" From 3f918a4541a49cc0d50d2931f8670e6e0074280e Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Tue, 11 Feb 2014 11:47:47 -0700 Subject: [PATCH 0457/4438] Set DEFAULT_IMAGE_NAME for Docker hypervisor This allows the tempest configuration to set the right image for booting docker containers with Nova. Since glance uploads are not yet integrated in devstack, IMAGE_URLS remains empty. Change-Id: I5df153cd1d5e1411bb3c11816122ce280148e129 --- lib/nova_plugins/hypervisor-docker | 2 +- stackrc | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index cdd9317761..b5df19db02 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -32,7 +32,7 @@ DOCKER_PID_FILE=/var/run/docker.pid DOCKER_REGISTRY_PORT=${DOCKER_REGISTRY_PORT:-5042} DOCKER_IMAGE=${DOCKER_IMAGE:-cirros:latest} -DOCKER_IMAGE_NAME=cirros +DOCKER_IMAGE_NAME=$DEFAULT_IMAGE_NAME DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-registry:latest} DOCKER_REGISTRY_IMAGE_NAME=registry DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME} diff --git a/stackrc b/stackrc index 7eed60cb2c..d754f3b074 100644 --- a/stackrc +++ b/stackrc @@ -280,6 +280,9 @@ case "$VIRT_DRIVER" in openvz) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64} IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz"};; + docker) + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros} + IMAGE_URLS=${IMAGE_URLS:-};; libvirt) case "$LIBVIRT_TYPE" in lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc From 1d50d78560910779d28db85591fbb67e1617ff34 Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Wed, 12 Feb 2014 18:23:36 -0500 Subject: [PATCH 0458/4438] Fix service name for marconi This patch updates TEMPEST_SERVICES, to have the same name as devstack marconi service. Change-Id: Ibc9b4a66fccd3d95ddd1717bf549476bd843204a Implements: blueprint add-basic-marconi-tests --- lib/marconi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index d1ab5f3a5c..0aaff1bd58 100644 --- a/lib/marconi +++ b/lib/marconi @@ -52,7 +52,7 @@ MARCONICLIENT_REPO=${MARCONICLIENT_REPO:-${GIT_BASE}/openstack/python-marconicli MARCONICLIENT_BRANCH=${MARCONICLIENT_BRANCH:-master} # Tell Tempest this project is present -TEMPEST_SERVICES+=,marconi +TEMPEST_SERVICES+=,marconi-server # Functions From a42541a9fb00e21b278a06d4034528976cbf8336 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 13 Feb 2014 09:39:15 -0500 Subject: [PATCH 0459/4438] add heat to the default devstack service list heat has been integrated for a while, we should turn it on out of the box. Also refactor the service list to make it simpler to understand what's enabled. Change-Id: I9738f39ce196d5c7f75b0a5b164222ea165fb340 --- stackrc | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 9166a171e1..165196c545 100644 --- a/stackrc +++ b/stackrc @@ -35,7 +35,18 @@ fi # enable_service neutron # # Optional, to enable tempest configuration as part of devstack # enable_service tempest -ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,mysql + +# core compute (glance / keystone / nova (+ nova-network)) +ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,n-sch,n-novnc,n-xvnc,n-cauth +# cinder +ENABLED_SERVICES+=,c-sch,c-api,c-vol +# heat +ENABLED_SERVICES+=,h-eng,h-api,h-api-cfn,h-api-cw +# dashboard +ENABLED_SERVICES+=,horizon +# additional services +ENABLED_SERVICES+=,rabbit,tempest,mysql + # Tell Tempest which services are available. The default is set here as # Tempest falls late in the configuration sequence. This differs from From 1bcd2800271d6a72237084ad7f36f84072eecd18 Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Thu, 13 Feb 2014 15:14:41 +0000 Subject: [PATCH 0460/4438] Don't warn about heat modifying flavors Since de0898a Heat no longer modifies flavors, so the comment and output related to modified flavors is no longer needed. Change-Id: I1007d2ab3387f28b8d7487f450cab4592f2824aa --- stack.sh | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/stack.sh b/stack.sh index e45707b781..1dc4b74ab3 100755 --- a/stack.sh +++ b/stack.sh @@ -1181,7 +1181,7 @@ fi # Configure and launch heat engine, api and metadata if is_service_enabled heat; then - # Initialize heat, including replacing nova flavors + # Initialize heat echo_summary "Configuring Heat" init_heat echo_summary "Starting Heat" @@ -1350,11 +1350,6 @@ if is_service_enabled horizon; then echo "Horizon is now available at http://$SERVICE_HOST/" fi -# Warn that the default flavors have been changed by Heat -if is_service_enabled heat; then - echo "Heat has replaced the default flavors. View by running: nova flavor-list" -fi - # If Keystone is present you can point ``nova`` cli to this server if is_service_enabled key; then echo "Keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/" From 4074e298112ba179ba743982c6904c8bd70030b2 Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Fri, 14 Feb 2014 00:54:58 +0900 Subject: [PATCH 0461/4438] Use lowercase section name in Neutron ML2 security group config All other security group configs in Neutron are lower-case and it should be consistent. Change-Id: I683333c1e186446a69172446cca6d9b952673ed4 Closes-Bug: #1279862 --- lib/neutron_plugins/ml2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index ab4e3474a6..4ceabe765d 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -93,9 +93,9 @@ function neutron_plugin_configure_service() { # instead use its own config variable to indicate whether security # groups is enabled, and that will need to be set here instead. if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver neutron.agent.not.a.real.FirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.not.a.real.FirewallDriver else - iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver neutron.agent.firewall.NoopFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver fi # Since we enable the tunnel TypeDrivers, also enable a local_ip From 22dece0d969b476cf187fe7359fa38d96189cdc1 Mon Sep 17 00:00:00 2001 From: John Eckersberg Date: Thu, 13 Feb 2014 16:21:24 -0500 Subject: [PATCH 0462/4438] Add Fedora support to install_docker.sh On Fedora, just install the docker-io package as supplied in the Fedora repository. Change-Id: Iea74878d3e1c434863c188ea2253817384e56bf4 --- tools/docker/install_docker.sh | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh index b9e1b242dd..27c8c8210b 100755 --- a/tools/docker/install_docker.sh +++ b/tools/docker/install_docker.sh @@ -30,15 +30,19 @@ SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} # Install Docker Service # ====================== -# Stop the auto-repo updates and do it when required here -NO_UPDATE_REPOS=True - -# Set up home repo -curl https://get.docker.io/gpg | sudo apt-key add - -install_package python-software-properties && \ - sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list" -apt_get update -install_package --force-yes lxc-docker socat +if is_fedora; then + install_package docker-io socat +else + # Stop the auto-repo updates and do it when required here + NO_UPDATE_REPOS=True + + # Set up home repo + curl https://get.docker.io/gpg | sudo apt-key add - + install_package python-software-properties && \ + sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list" + apt_get update + install_package --force-yes lxc-docker socat +fi # Start the daemon - restart just in case the package ever auto-starts... restart_service docker From d6997d317685353482a0aa7a18408c1313583460 Mon Sep 17 00:00:00 2001 From: John Griffith Date: Thu, 13 Feb 2014 22:56:29 +0000 Subject: [PATCH 0463/4438] Fix tee statement for catching tempest output The use of exec and tee doesn't seem to be quite right, and was unreliable in terms of catching the exit status of the tempest test as well as not catching the output when things went wrong. This changes the way we do the redirect and the tee to something that should be more robust and seems to work reliably in testing. Change-Id: Ieb9d725839fb8e3f9e2e63a2b7b2e9c7c86713a2 --- driver_certs/cinder_driver_cert.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh index edcc6d4800..8380deea42 100755 --- a/driver_certs/cinder_driver_cert.sh +++ b/driver_certs/cinder_driver_cert.sh @@ -89,9 +89,8 @@ start_cinder sleep 5 # run tempest api/volume/test_* -log_message "Run the actual tempest volume tests (./tools/pretty_tox.sh api.volume_*)...", True -exec 2> >(tee -a $TEMPFILE) -`./tools/pretty_tox.sh api.volume` +log_message "Run the actual tempest volume tests (./tools/pretty_tox.sh api.volume)...", True +./tools/pretty_tox.sh api.volume 2>&1 | tee -a $TEMPFILE if [[ $? = 0 ]]; then log_message "CONGRATULATIONS!!! Device driver PASSED!", True log_message "Submit output: ($TEMPFILE)" From 0b3aacc707ab8b3593285e02dc172b3c96730efc Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Thu, 13 Feb 2014 18:18:51 -0500 Subject: [PATCH 0464/4438] Fix MARCONI_USER This patch fixes the MARCONI_USER in create_marconi_accounts(). Change-Id: I9618530fa20ee84d25646107c7450017ada908df --- lib/marconi | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index 0aaff1bd58..e400419d57 100644 --- a/lib/marconi +++ b/lib/marconi @@ -154,10 +154,12 @@ function create_marconi_accounts() { MARCONI_USER=$(get_id keystone user-create --name=marconi \ --pass="$SERVICE_PASSWORD" \ --tenant-id $SERVICE_TENANT \ - --email=marconi@example.com) + --email=marconi@example.com \ + | grep " id " | get_field 2) keystone user-role-add --tenant-id $SERVICE_TENANT \ --user-id $MARCONI_USER \ --role-id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then MARCONI_SERVICE=$(keystone service-create \ --name=marconi \ From 16d3ad057dc0b982c801fcfa9d5497c1daeb34cd Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Thu, 13 Feb 2014 18:59:50 -0600 Subject: [PATCH 0465/4438] Use database connection for keystone The keystone configuration used the 'connection' option in the 'sql' section of the keystone.conf file. This option is deprecated in favor of 'connection' in the 'database' section. The keystone setup code is changed to use the option in the new section rather than the deprecated one. Change-Id: I62fd2f50ded3b8848e9e5225e88c80ed8fed3bff --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 4f7f68b57f..5e2e88d33f 100644 --- a/lib/keystone +++ b/lib/keystone @@ -201,7 +201,7 @@ function configure_keystone() { iniset $KEYSTONE_CONF token provider keystone.token.providers.uuid.Provider fi - iniset $KEYSTONE_CONF sql connection `database_connection_url keystone` + iniset $KEYSTONE_CONF database connection `database_connection_url keystone` iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2" if [[ "$KEYSTONE_TOKEN_BACKEND" = "sql" ]]; then From 41d96d7b4837c6bafc2622954a3c6c1fdcc13a82 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 11 Feb 2014 09:08:35 -0600 Subject: [PATCH 0466/4438] Marconi fixes for precise Fix a couple of problems found on Ubuntu: * $MARCONI_DIR/etc/marconi/policy.json doesn't exist; removed the copy to /etc/marconi * added a seting of nssize in /etc/mongodb.conf for Ubuntu * restart the correct serice name on Ubuntu Change-Id: I9bd2ab1aa4fb94ff96559e069e5b62138c358fb5 --- lib/marconi | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/lib/marconi b/lib/marconi index 1eaebbdf16..3d5ef82471 100644 --- a/lib/marconi +++ b/lib/marconi @@ -82,10 +82,6 @@ function configure_marconi() { iniset $MARCONI_CONF DEFAULT verbose True iniset $MARCONI_CONF 'drivers:transport:wsgi' bind '0.0.0.0' - # Install the policy file for the API server - cp $MARCONI_DIR/etc/marconi/policy.json $MARCONI_CONF_DIR - iniset $MARCONI_CONF DEFAULT policy_file $MARCONI_CONF_DIR/policy.json - iniset $MARCONI_CONF keystone_authtoken auth_protocol http iniset $MARCONI_CONF keystone_authtoken admin_user marconi iniset $MARCONI_CONF keystone_authtoken admin_password $SERVICE_PASSWORD @@ -102,9 +98,16 @@ function configure_marconi() { function configure_mongodb() { # Set nssize to 2GB. This increases the number of namespaces supported # # per database. - sudo sed -i '/--nssize/!s/OPTIONS=\"/OPTIONS=\"--nssize 2047 /' /etc/sysconfig/mongod - - restart_service mongod + if is_ubuntu; then + sudo sed -i -e " + s|[^ \t]*#[ \t]*\(nssize[ \t]*=.*\$\)|\1| + s|^\(nssize[ \t]*=[ \t]*\).*\$|\1 2047| + " /etc/mongodb.conf + restart_service mongodb + elif is_fedora; then + sudo sed -i '/--nssize/!s/OPTIONS=\"/OPTIONS=\"--nssize 2047 /' /etc/sysconfig/mongod + restart_service mongod + fi } # init_marconi() - Initialize etc. From 1e4e3acaadc1397a7d69a83e8fe9a54dd879983a Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Fri, 14 Feb 2014 11:29:26 -0500 Subject: [PATCH 0467/4438] Revert the tempest service name to marconi This patch is to rollback the change introduced by https://review.openstack.org/#/c/73100/. 73100 is no longer needed because of the recent https://review.openstack.org/#/c/69497/. Using 'marconi' as the service name will keep us aligned with the naming convention used by other projects. Change-Id: I5da6d2aaeb5c9dc29a1cbc70c8425449807eb34c --- lib/marconi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index 0aaff1bd58..d1ab5f3a5c 100644 --- a/lib/marconi +++ b/lib/marconi @@ -52,7 +52,7 @@ MARCONICLIENT_REPO=${MARCONICLIENT_REPO:-${GIT_BASE}/openstack/python-marconicli MARCONICLIENT_BRANCH=${MARCONICLIENT_BRANCH:-master} # Tell Tempest this project is present -TEMPEST_SERVICES+=,marconi-server +TEMPEST_SERVICES+=,marconi # Functions From 5705db691386809e288758a0314dfa60d9b36da7 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Sat, 1 Feb 2014 20:06:42 -0500 Subject: [PATCH 0468/4438] Optionally enable file injection There is a patch up for nova right now that disables file injection by default. This is a corresponding devstack change that only sets file injection options if it is enabled in the devstack config. This is good to keep around so that we can easily turn it on for testing. The nova change is id Icff1304fc816acc843f8962727aef8bbbc7bbaa3. Change-Id: I5015f2c351b1d680c205d7f9a5204febca490b91 --- lib/nova | 6 ------ lib/nova_plugins/hypervisor-libvirt | 16 ++++++++++++++++ 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/lib/nova b/lib/nova index eaaaa6210c..d90aea7108 100644 --- a/lib/nova +++ b/lib/nova @@ -513,12 +513,6 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" iniset_rpc_backend nova $NOVA_CONF DEFAULT iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" - - if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - # File injection is being disabled by default in the near future - - # disable it here for now to avoid surprises later. - iniset $NOVA_CONF libvirt inject_partition '-2' - fi } function init_nova_cells() { diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 42d3af15cf..415244ffae 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -23,6 +23,9 @@ set +o xtrace # Defaults # -------- +# File injection is disabled by default in Nova. This will turn it back on. +ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False} + # Entry Points # ------------ @@ -116,6 +119,19 @@ EOF" if is_arch "ppc64"; then iniset $NOVA_CONF DEFAULT vnc_enabled "false" fi + + ENABLE_FILE_INJECTION=$(trueorfalse False $ENABLE_FILE_INJECTION) + if [[ "$ENABLE_FILE_INJECTION" = "True" ]] ; then + # When libguestfs is available for file injection, enable using + # libguestfs to inspect the image and figure out the proper + # partition to inject into. + iniset $NOVA_CONF libvirt inject_partition '-1' + iniset $NOVA_CONF libvirt inject_key 'true' + else + # File injection is being disabled by default in the near future - + # disable it here for now to avoid surprises later. + iniset $NOVA_CONF libvirt inject_partition '-2' + fi } # install_nova_hypervisor() - Install external components From 19685428e3d3e51ff88aa5254f7c27d476053798 Mon Sep 17 00:00:00 2001 From: Steve Martinelli Date: Fri, 24 Jan 2014 13:02:26 -0600 Subject: [PATCH 0469/4438] Change most keystoneclient commands to openstacklient in libs migrated most keystoneclient commands from the following libs: ceilometer cinder ironic keystone marconi neutron nova savanna swift trove Also need to set and unset openstackclient specific environment variables from stack.sh Change-Id: I725f30bc08e1df5a4c5770576c19ad1ddaeb843a --- lib/ceilometer | 36 +++++++++++++-------------- lib/cinder | 39 ++++++++++++++--------------- lib/ironic | 30 +++++++++++----------- lib/keystone | 67 ++++++++++++++++++++++++++++---------------------- lib/marconi | 32 +++++++++++++----------- lib/neutron | 32 ++++++++++++------------ lib/nova | 38 ++++++++++++++-------------- lib/savanna | 32 ++++++++++++------------ lib/swift | 50 +++++++++++++++++++++++-------------- lib/trove | 29 +++++++++++----------- stack.sh | 4 +++ 11 files changed, 209 insertions(+), 180 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 4ca77bb72b..6c87d03b13 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -71,33 +71,33 @@ function is_ceilometer_enabled { create_ceilometer_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") # Ceilometer if [[ "$ENABLED_SERVICES" =~ "ceilometer-api" ]]; then - CEILOMETER_USER=$(keystone user-create \ - --name=ceilometer \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=ceilometer@example.com \ + CEILOMETER_USER=$(openstack user create \ + ceilometer \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email ceilometer@example.com \ | grep " id " | get_field 2) - keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $CEILOMETER_USER \ - --role-id $ADMIN_ROLE + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $CEILOMETER_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - CEILOMETER_SERVICE=$(keystone service-create \ - --name=ceilometer \ + CEILOMETER_SERVICE=$(openstack service create \ + ceilometer \ --type=metering \ --description="OpenStack Telemetry Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $CEILOMETER_SERVICE \ --region RegionOne \ - --service_id $CEILOMETER_SERVICE \ - --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" \ - --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" \ - --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" + --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ + --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ + --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" fi fi } diff --git a/lib/cinder b/lib/cinder index d5e78bb39c..c8c90c098d 100644 --- a/lib/cinder +++ b/lib/cinder @@ -330,45 +330,44 @@ function configure_cinder() { # Migrated from keystone_data.sh create_cinder_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") # Cinder if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - CINDER_USER=$(keystone user-create \ - --name=cinder \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=cinder@example.com \ + CINDER_USER=$(openstack user create \ + cinder \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email cinder@example.com \ | grep " id " | get_field 2) - keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $CINDER_USER \ - --role-id $ADMIN_ROLE + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $CINDER_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - CINDER_SERVICE=$(keystone service-create \ - --name=cinder \ + CINDER_SERVICE=$(openstack service create \ + cinder \ --type=volume \ --description="Cinder Volume Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $CINDER_SERVICE \ --region RegionOne \ - --service_id $CINDER_SERVICE \ --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" - CINDER_V2_SERVICE=$(keystone service-create \ - --name=cinderv2 \ + CINDER_V2_SERVICE=$(openstack service create \ + cinderv2 \ --type=volumev2 \ --description="Cinder Volume Service V2" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $CINDER_V2_SERVICE \ --region RegionOne \ - --service_id $CINDER_V2_SERVICE \ --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \ --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \ --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" - fi fi } diff --git a/lib/ironic b/lib/ironic index 3c0e3cbaf7..607b13125a 100644 --- a/lib/ironic +++ b/lib/ironic @@ -145,30 +145,30 @@ function create_ironic_cache_dir() { # service ironic admin # if enabled create_ironic_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") # Ironic if [[ "$ENABLED_SERVICES" =~ "ir-api" ]]; then - IRONIC_USER=$(keystone user-create \ - --name=ironic \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=ironic@example.com \ + IRONIC_USER=$(openstack user create \ + ironic \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email ironic@example.com \ | grep " id " | get_field 2) - keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user_id $IRONIC_USER \ - --role_id $ADMIN_ROLE + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $IRONIC_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - IRONIC_SERVICE=$(keystone service-create \ - --name=ironic \ + IRONIC_SERVICE=$(openstack service create \ + ironic \ --type=baremetal \ --description="Ironic baremetal provisioning service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $IRONIC_SERVICE \ --region RegionOne \ - --service_id $IRONIC_SERVICE \ --publicurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \ --adminurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \ --internalurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" diff --git a/lib/keystone b/lib/keystone index 4f7f68b57f..bf0dcbb1bb 100644 --- a/lib/keystone +++ b/lib/keystone @@ -275,60 +275,69 @@ function configure_keystone() { create_keystone_accounts() { # admin - ADMIN_TENANT=$(keystone tenant-create \ - --name admin \ + ADMIN_TENANT=$(openstack project create \ + admin \ | grep " id " | get_field 2) - ADMIN_USER=$(keystone user-create \ - --name admin \ - --pass "$ADMIN_PASSWORD" \ + ADMIN_USER=$(openstack user create \ + admin \ + --project "$ADMIN_TENANT" \ --email admin@example.com \ + --password "$ADMIN_PASSWORD" \ | grep " id " | get_field 2) - ADMIN_ROLE=$(keystone role-create \ - --name admin \ + ADMIN_ROLE=$(openstack role create \ + admin \ | grep " id " | get_field 2) - keystone user-role-add \ - --user-id $ADMIN_USER \ - --role-id $ADMIN_ROLE \ - --tenant-id $ADMIN_TENANT + openstack role add \ + $ADMIN_ROLE \ + --project $ADMIN_TENANT \ + --user $ADMIN_USER # service - SERVICE_TENANT=$(keystone tenant-create \ - --name $SERVICE_TENANT_NAME \ + SERVICE_TENANT=$(openstack project create \ + $SERVICE_TENANT_NAME \ | grep " id " | get_field 2) # The Member role is used by Horizon and Swift so we need to keep it: - MEMBER_ROLE=$(keystone role-create --name=Member | grep " id " | get_field 2) + MEMBER_ROLE=$(openstack role create \ + Member \ + | grep " id " | get_field 2) # ANOTHER_ROLE demonstrates that an arbitrary role may be created and used # TODO(sleepsonthefloor): show how this can be used for rbac in the future! - ANOTHER_ROLE=$(keystone role-create --name=anotherrole | grep " id " | get_field 2) + ANOTHER_ROLE=$(openstack role create \ + anotherrole \ + | grep " id " | get_field 2) # invisible tenant - admin can't see this one - INVIS_TENANT=$(keystone tenant-create --name=invisible_to_admin | grep " id " | get_field 2) + INVIS_TENANT=$(openstack project create \ + invisible_to_admin \ + | grep " id " | get_field 2) # demo - DEMO_TENANT=$(keystone tenant-create \ - --name=demo \ + DEMO_TENANT=$(openstack project create \ + demo \ | grep " id " | get_field 2) - DEMO_USER=$(keystone user-create \ - --name demo \ - --pass "$ADMIN_PASSWORD" \ + DEMO_USER=$(openstack user create \ + demo \ + --project $DEMO_TENANT \ --email demo@example.com \ + --password "$ADMIN_PASSWORD" \ | grep " id " | get_field 2) - keystone user-role-add --user-id $DEMO_USER --role-id $MEMBER_ROLE --tenant-id $DEMO_TENANT - keystone user-role-add --user-id $ADMIN_USER --role-id $ADMIN_ROLE --tenant-id $DEMO_TENANT - keystone user-role-add --user-id $DEMO_USER --role-id $ANOTHER_ROLE --tenant-id $DEMO_TENANT - keystone user-role-add --user-id $DEMO_USER --role-id $MEMBER_ROLE --tenant-id $INVIS_TENANT + + openstack role add --project $DEMO_TENANT --user $DEMO_USER $MEMBER_ROLE + openstack role add --project $DEMO_TENANT --user $ADMIN_USER $ADMIN_ROLE + openstack role add --project $DEMO_TENANT --user $DEMO_USER $ANOTHER_ROLE + openstack role add --project $INVIS_TENANT --user $DEMO_USER $MEMBER_ROLE # Keystone if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - KEYSTONE_SERVICE=$(keystone service-create \ - --name keystone \ + KEYSTONE_SERVICE=$(openstack service create \ + keystone \ --type identity \ --description "Keystone Identity Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $KEYSTONE_SERVICE \ --region RegionOne \ - --service_id $KEYSTONE_SERVICE \ --publicurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION" \ --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v$IDENTITY_API_VERSION" \ --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION" diff --git a/lib/marconi b/lib/marconi index 7c8fd14255..88312cb1bd 100644 --- a/lib/marconi +++ b/lib/marconi @@ -151,27 +151,29 @@ function stop_marconi() { } function create_marconi_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") - - MARCONI_USER=$(get_id keystone user-create --name=marconi \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=marconi@example.com \ - | grep " id " | get_field 2) - keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $MARCONI_USER \ - --role-id $ADMIN_ROLE + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") + + MARCONI_USER=$(openstack user create \ + marconi \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email marconi@example.com \ + | grep " id " | get_field 2) + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $MARCONI_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - MARCONI_SERVICE=$(keystone service-create \ - --name=marconi \ + MARCONI_SERVICE=$(openstack service create \ + marconi \ --type=queuing \ --description="Marconi Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $MARCONI_SERVICE \ --region RegionOne \ - --service_id $MARCONI_SERVICE \ --publicurl "http://$SERVICE_HOST:8888" \ --adminurl "http://$SERVICE_HOST:8888" \ --internalurl "http://$SERVICE_HOST:8888" diff --git a/lib/neutron b/lib/neutron index 5bd38bcf73..df276c71d5 100644 --- a/lib/neutron +++ b/lib/neutron @@ -332,29 +332,29 @@ function create_neutron_cache_dir() { # Migrated from keystone_data.sh function create_neutron_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - NEUTRON_USER=$(keystone user-create \ - --name=neutron \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=neutron@example.com \ + NEUTRON_USER=$(openstack user create \ + neutron \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email neutron@example.com \ | grep " id " | get_field 2) - keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $NEUTRON_USER \ - --role-id $ADMIN_ROLE + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $NEUTRON_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - NEUTRON_SERVICE=$(keystone service-create \ - --name=neutron \ + NEUTRON_SERVICE=$(openstack service create \ + neutron \ --type=network \ --description="Neutron Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $NEUTRON_SERVICE \ --region RegionOne \ - --service_id $NEUTRON_SERVICE \ --publicurl "http://$SERVICE_HOST:9696/" \ --adminurl "http://$SERVICE_HOST:9696/" \ --internalurl "http://$SERVICE_HOST:9696/" @@ -363,7 +363,7 @@ function create_neutron_accounts() { } function create_neutron_initial_network() { - TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1) + TENANT_ID=$(openstack project list | grep " demo " | get_field 1) die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for demo" # Create a small network diff --git a/lib/nova b/lib/nova index d90aea7108..fefeda1236 100644 --- a/lib/nova +++ b/lib/nova @@ -324,41 +324,41 @@ function configure_nova() { # Migrated from keystone_data.sh create_nova_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") # Nova if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - NOVA_USER=$(keystone user-create \ - --name=nova \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=nova@example.com \ + NOVA_USER=$(openstack user create \ + nova \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email nova@example.com \ | grep " id " | get_field 2) - keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $NOVA_USER \ - --role-id $ADMIN_ROLE + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $NOVA_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - NOVA_SERVICE=$(keystone service-create \ - --name=nova \ + NOVA_SERVICE=$(openstack service create \ + nova \ --type=compute \ --description="Nova Compute Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $NOVA_SERVICE \ --region RegionOne \ - --service_id $NOVA_SERVICE \ --publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" - NOVA_V3_SERVICE=$(keystone service-create \ - --name=novav3 \ + NOVA_V3_SERVICE=$(openstack service create \ + novav3 \ --type=computev3 \ --description="Nova Compute Service V3" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $NOVA_V3_SERVICE \ --region RegionOne \ - --service_id $NOVA_V3_SERVICE \ --publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \ --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \ --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" diff --git a/lib/savanna b/lib/savanna index 6f42311971..43c5e386fe 100644 --- a/lib/savanna +++ b/lib/savanna @@ -54,29 +54,29 @@ TEMPEST_SERVICES+=,savanna # service savanna admin function create_savanna_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") - - SAVANNA_USER=$(keystone user-create \ - --name=savanna \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=savanna@example.com \ + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") + + SAVANNA_USER=$(openstack user create \ + savanna \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email savanna@example.com \ | grep " id " | get_field 2) - keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $SAVANNA_USER \ - --role-id $ADMIN_ROLE + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $SAVANNA_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - SAVANNA_SERVICE=$(keystone service-create \ - --name=savanna \ + SAVANNA_SERVICE=$(openstack service create \ + savanna \ --type=data_processing \ --description="Savanna Data Processing" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $SAVANNA_SERVICE \ --region RegionOne \ - --service_id $SAVANNA_SERVICE \ --publicurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ --adminurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ --internalurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" diff --git a/lib/swift b/lib/swift index be25c81468..df586abe8b 100644 --- a/lib/swift +++ b/lib/swift @@ -527,39 +527,53 @@ function create_swift_accounts() { KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql} - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") - - SWIFT_USER=$(keystone user-create --name=swift --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT --email=swift@example.com | grep " id " | get_field 2) - keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $SWIFT_USER --role-id $ADMIN_ROLE + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") + + SWIFT_USER=$(openstack user create \ + swift \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email=swift@example.com \ + | grep " id " | get_field 2) + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $SWIFT_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - SWIFT_SERVICE=$(keystone service-create --name=swift --type="object-store" \ - --description="Swift Service" | grep " id " | get_field 2) - keystone endpoint-create \ + SWIFT_SERVICE=$(openstack service create \ + swift \ + --type="object-store" \ + --description="Swift Service" \ + | grep " id " | get_field 2) + openstack endpoint create \ + $SWIFT_SERVICE \ --region RegionOne \ - --service_id $SWIFT_SERVICE \ --publicurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \ --adminurl "http://$SERVICE_HOST:8080" \ --internalurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" fi - SWIFT_TENANT_TEST1=$(keystone tenant-create --name=swifttenanttest1 | grep " id " | get_field 2) + SWIFT_TENANT_TEST1=$(openstack project create swifttenanttest1 | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_TENANT_TEST1 "Failure creating SWIFT_TENANT_TEST1" - SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=$SWIFTUSERTEST1_PASSWORD --email=test@example.com | grep " id " | get_field 2) + SWIFT_USER_TEST1=$(openstack user create swiftusertest1 --password=$SWIFTUSERTEST1_PASSWORD \ + --project "$SWIFT_TENANT_TEST1" --email=test@example.com | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1" - keystone user-role-add --user-id $SWIFT_USER_TEST1 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST1 + openstack role add --user $SWIFT_USER_TEST1 --project $SWIFT_TENANT_TEST1 $ADMIN_ROLE - SWIFT_USER_TEST3=$(keystone user-create --name=swiftusertest3 --pass=$SWIFTUSERTEST3_PASSWORD --email=test3@example.com | grep " id " | get_field 2) + SWIFT_USER_TEST3=$(openstack user create swiftusertest3 --password=$SWIFTUSERTEST3_PASSWORD \ + --project "$SWIFT_TENANT_TEST1" --email=test3@example.com | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_USER_TEST3 "Failure creating SWIFT_USER_TEST3" - keystone user-role-add --user-id $SWIFT_USER_TEST3 --role-id $ANOTHER_ROLE --tenant-id $SWIFT_TENANT_TEST1 + openstack role add --user $SWIFT_USER_TEST3 --project $SWIFT_TENANT_TEST1 $ANOTHER_ROLE - SWIFT_TENANT_TEST2=$(keystone tenant-create --name=swifttenanttest2 | grep " id " | get_field 2) + SWIFT_TENANT_TEST2=$(openstack project create swifttenanttest2 | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_TENANT_TEST2 "Failure creating SWIFT_TENANT_TEST2" - SWIFT_USER_TEST2=$(keystone user-create --name=swiftusertest2 --pass=$SWIFTUSERTEST2_PASSWORD --email=test2@example.com | grep " id " | get_field 2) + + SWIFT_USER_TEST2=$(openstack user create swiftusertest2 --password=$SWIFTUSERTEST2_PASSWORD \ + --project "$SWIFT_TENANT_TEST2" --email=test2@example.com | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_USER_TEST2 "Failure creating SWIFT_USER_TEST2" - keystone user-role-add --user-id $SWIFT_USER_TEST2 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST2 + openstack role add --user $SWIFT_USER_TEST2 --project $SWIFT_TENANT_TEST2 $ADMIN_ROLE } # init_swift() - Initialize rings diff --git a/lib/trove b/lib/trove index bb4549121d..5e1bbd548d 100644 --- a/lib/trove +++ b/lib/trove @@ -71,28 +71,29 @@ function setup_trove_logging() { create_trove_accounts() { # Trove - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - SERVICE_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + SERVICE_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") if [[ "$ENABLED_SERVICES" =~ "trove" ]]; then - TROVE_USER=$(keystone user-create \ - --name=trove \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=trove@example.com \ + TROVE_USER=$(openstack user create \ + trove \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email trove@example.com \ | grep " id " | get_field 2) - keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $TROVE_USER \ - --role-id $SERVICE_ROLE + openstack role add \ + $SERVICE_ROLE \ + --project $SERVICE_TENANT \ + --user $TROVE_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - TROVE_SERVICE=$(keystone service-create \ - --name=trove \ + TROVE_SERVICE=$(openstack service create + trove \ --type=database \ --description="Trove Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $TROVE_SERVICE \ --region RegionOne \ - --service_id $TROVE_SERVICE \ --publicurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \ --adminurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \ --internalurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" diff --git a/stack.sh b/stack.sh index c153132485..e5d87cca11 100755 --- a/stack.sh +++ b/stack.sh @@ -925,6 +925,9 @@ if is_service_enabled key; then # Do the keystone-specific bits from keystone_data.sh export OS_SERVICE_TOKEN=$SERVICE_TOKEN export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT + # Add temporarily to make openstackclient work + export OS_TOKEN=$SERVICE_TOKEN + export OS_URL=$SERVICE_ENDPOINT create_keystone_accounts create_nova_accounts create_cinder_accounts @@ -947,6 +950,7 @@ if is_service_enabled key; then bash -x $FILES/keystone_data.sh # Set up auth creds now that keystone is bootstrapped + unset OS_TOKEN OS_URL export OS_AUTH_URL=$SERVICE_ENDPOINT export OS_TENANT_NAME=admin export OS_USERNAME=admin From 33d1f86a4931de76fba555a9a3f5e5fa3fd7c171 Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Thu, 13 Feb 2014 15:00:33 +0000 Subject: [PATCH 0470/4438] Add support for creating heat stack domain The Heat instance-users blueprint requires an additional domain where heat creates projects and users related to stack resources so add support for creating this domain when configured to install Heat. Note a workaround is currently required to make the openstack command work with the v3 keystone API. Change-Id: I36157372d85b577952b55481ca5cc42146011a54 --- lib/heat | 20 ++++++++++++++++++++ stack.sh | 4 ++++ 2 files changed, 24 insertions(+) diff --git a/lib/heat b/lib/heat index 9f5dd8b588..efb01ef3b8 100644 --- a/lib/heat +++ b/lib/heat @@ -110,6 +110,15 @@ function configure_heat() { iniset $HEAT_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 iniset $HEAT_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens + # stack user domain + # Note we have to pass token/endpoint here because the current endpoint and + # version negotiation in OSC means just --os-identity-api-version=3 won't work + KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" + D_ID=$(openstack --os-token $OS_SERVICE_TOKEN --os-url=$KS_ENDPOINT_V3 \ + --os-identity-api-version=3 domain show heat \ + | grep ' id ' | get_field 2) + iniset $HEAT_CONF stack_user_domain ${D_ID} + # paste_deploy [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_CONF paste_deploy flavor standalone @@ -196,6 +205,17 @@ function disk_image_create { upload_image "http://localhost/$output.qcow2" $TOKEN } +# create_heat_accounts() - Set up common required heat accounts +# Note this is in addition to what is in files/keystone_data.sh +function create_heat_accounts() { + # Note we have to pass token/endpoint here because the current endpoint and + # version negotiation in OSC means just --os-identity-api-version=3 won't work + KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" + openstack --os-token $OS_SERVICE_TOKEN --os-url=$KS_ENDPOINT_V3 \ + --os-identity-api-version=3 domain create heat \ + --description "Owns users and projects created by heat" +} + # Restore xtrace $XTRACE diff --git a/stack.sh b/stack.sh index c153132485..824982e4c6 100755 --- a/stack.sh +++ b/stack.sh @@ -938,6 +938,10 @@ if is_service_enabled key; then create_swift_accounts fi + if is_service_enabled heat; then + create_heat_accounts + fi + # ``keystone_data.sh`` creates services, admin and demo users, and roles. ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ From 351173624c4a3e24aa479c6ce5f557732bff40e7 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Mon, 17 Feb 2014 18:38:07 +0400 Subject: [PATCH 0471/4438] Improve savanna-dashboard installation * split configurations setting to one-per-line; * don't set SAVANNA_URL in horizon configs - we're now using endpoits keystone to find corresponding edpoint. Change-Id: I9497a511656a2f70e923b651c66c5ef2917a0939 --- lib/savanna-dashboard | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard index 7713a78637..691b23f6e8 100644 --- a/lib/savanna-dashboard +++ b/lib/savanna-dashboard @@ -37,8 +37,9 @@ SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient function configure_savanna_dashboard() { - echo -e "SAVANNA_URL = \"http://$SERVICE_HOST:8386/v1.1\"\nAUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py - echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)\nINSTALLED_APPS += ('savannadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py + echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py + echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)" >> $HORIZON_DIR/openstack_dashboard/settings.py + echo -e "INSTALLED_APPS += ('savannadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py if is_service_enabled neutron; then echo -e "SAVANNA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py From d8864feae93f898f043febf0b4734f0b61c602d4 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 17 Feb 2014 11:00:42 -0600 Subject: [PATCH 0472/4438] Fix shocco errors Clean up comments to fix errors seen while processing with shocco Change-Id: I0e97ad27613313f03e47c107051ea93b115d4744 --- driver_certs/cinder_driver_cert.sh | 1 + functions | 7 ++++++- lib/apache | 4 ++-- lib/marconi | 3 ++- lib/stackforge | 5 +++-- tools/create_userrc.sh | 4 +--- tools/fixup_stuff.sh | 3 ++- 7 files changed, 17 insertions(+), 10 deletions(-) diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh index 99b2c8e899..e45b7f8736 100755 --- a/driver_certs/cinder_driver_cert.sh +++ b/driver_certs/cinder_driver_cert.sh @@ -16,6 +16,7 @@ # It also assumes default install location (/opt/stack/xxx) # to aid in debug, you should also verify that you've added # an output directory for screen logs: +# # SCREEN_LOGDIR=/opt/stack/screen-logs CERT_DIR=$(cd $(dirname "$0") && pwd) diff --git a/functions b/functions index 5eae7fe510..6979c6c155 100644 --- a/functions +++ b/functions @@ -2,10 +2,15 @@ # # The following variables are assumed to be defined by certain functions: # +# - ``DATABASE_BACKENDS`` # - ``ENABLED_SERVICES`` # - ``FILES`` # - ``GLANCE_HOSTPORT`` +# - ``REQUIREMENTS_DIR`` +# - ``STACK_USER`` # - ``TRACK_DEPENDS`` +# - ``UNDO_REQUIREMENTS`` +# # Include the common functions FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) @@ -45,7 +50,7 @@ function cleanup_tmp { # Updates the dependencies in project_dir from the # openstack/requirements global list before installing anything. # -# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR`` +# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``, ``UNDO_REQUIREMENTS`` # setup_develop directory function setup_develop() { local project_dir=$1 diff --git a/lib/apache b/lib/apache index 8ae78b2181..0e5712f56b 100644 --- a/lib/apache +++ b/lib/apache @@ -4,8 +4,8 @@ # Dependencies: # # - ``functions`` file -# -``STACK_USER`` must be defined - +# - ``STACK_USER`` must be defined +# # lib/apache exports the following functions: # # - is_apache_enabled_service diff --git a/lib/marconi b/lib/marconi index 88312cb1bd..cc33aebd2b 100644 --- a/lib/marconi +++ b/lib/marconi @@ -2,7 +2,8 @@ # Install and start **Marconi** service # To enable a minimal set of Marconi services, add the following to localrc: -# enable_service marconi-server +# +# enable_service marconi-server # # Dependencies: # - functions diff --git a/lib/stackforge b/lib/stackforge index 718b818ff6..5fa4570b74 100644 --- a/lib/stackforge +++ b/lib/stackforge @@ -6,8 +6,9 @@ # This is appropriate for python libraries that release to pypi and are # expected to be used beyond OpenStack like, but are requirements # for core services in global-requirements. -# * wsme -# * pecan +# +# * wsme +# * pecan # # This is not appropriate for stackforge projects which are early stage # OpenStack tools diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index d9c93cc476..c4eb8d4581 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -54,9 +54,7 @@ $0 -P -C mytenant -u myuser -p mypass EOF } -if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,os-cacert:,help,debug -- "$@") -then - #parse error +if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,os-cacert:,help,debug -- "$@"); then display_help exit 1 fi diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index a28e10ef2d..47b0cd10cd 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -70,7 +70,8 @@ if [[ -d $dir ]]; then fi # Ubuntu 12.04 -# ----- +# ------------ + # We can regularly get kernel crashes on the 12.04 default kernel, so attempt # to install a new kernel if [[ ${DISTRO} =~ (precise) ]]; then From b72235611d9659a49caf87b2cc89f05fce27a3e0 Mon Sep 17 00:00:00 2001 From: Daniel Salinas Date: Sun, 16 Feb 2014 18:57:20 -0600 Subject: [PATCH 0473/4438] Fixed missing backslash in lib/trove This is breaking the installation of trove with devstack Change-Id: I8b59d96072da47b8be5000eda835258654230b0f Closes-Bug: 1280915 --- lib/trove | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/trove b/lib/trove index 5e1bbd548d..6834149c64 100644 --- a/lib/trove +++ b/lib/trove @@ -86,7 +86,7 @@ create_trove_accounts() { --project $SERVICE_TENANT \ --user $TROVE_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - TROVE_SERVICE=$(openstack service create + TROVE_SERVICE=$(openstack service create \ trove \ --type=database \ --description="Trove Service" \ From 18d5c833d47e41c8c8dcd73f35268d6e2b43df5b Mon Sep 17 00:00:00 2001 From: Ryu Ishimoto Date: Wed, 19 Feb 2014 00:33:46 +0900 Subject: [PATCH 0474/4438] Remove provider router configuration To be compatible with the Icehouse release of MidoNet, the provider router configuration is removed from devstack since it is no longer necessary to configure it. Change-Id: I4be2d9bbf2c82fd375702cbb1d60c3277086134f Implements: blueprint remove-provider-router-config-for-midonet --- lib/neutron_plugins/midonet | 11 ++++++----- lib/neutron_thirdparty/midonet | 19 ++----------------- 2 files changed, 8 insertions(+), 22 deletions(-) diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet index f95fcb75b9..dd3b2baeca 100644 --- a/lib/neutron_plugins/midonet +++ b/lib/neutron_plugins/midonet @@ -1,6 +1,10 @@ # Neutron MidoNet plugin # ---------------------- +MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet} +MIDONET_API_PORT=${MIDONET_API_PORT:-8080} +MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-api} + # Save trace setting MY_XTRACE=$(set +o | grep xtrace) set +o xtrace @@ -47,8 +51,8 @@ function neutron_plugin_configure_plugin_agent() { } function neutron_plugin_configure_service() { - if [[ "$MIDONET_API_URI" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE MIDONET midonet_uri $MIDONET_API_URI + if [[ "$MIDONET_API_URL" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE MIDONET midonet_uri $MIDONET_API_URL fi if [[ "$MIDONET_USERNAME" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE MIDONET username $MIDONET_USERNAME @@ -59,9 +63,6 @@ function neutron_plugin_configure_service() { if [[ "$MIDONET_PROJECT_ID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE MIDONET project_id $MIDONET_PROJECT_ID fi - if [[ "$MIDONET_PROVIDER_ROUTER_ID" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $MIDONET_PROVIDER_ROUTER_ID - fi Q_L3_ENABLED=True Q_L3_ROUTER_PER_TENANT=True diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet index e672528a2d..98be4254fc 100644 --- a/lib/neutron_thirdparty/midonet +++ b/lib/neutron_thirdparty/midonet @@ -10,20 +10,12 @@ # MidoNet devstack destination dir MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet} -MIDONET_API_PORT=${MIDONET_API_PORT:-8080} -MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-api} # MidoNet client repo MIDONET_CLIENT_REPO=${MIDONET_CLIENT_REPO:-https://github.com/midokura/python-midonetclient.git} MIDONET_CLIENT_BRANCH=${MIDONET_CLIENT_BRANCH:-master} MIDONET_CLIENT_DIR=${MIDONET_CLIENT_DIR:-$MIDONET_DIR/python-midonetclient} -# MidoNet OpenStack repo -MIDONET_OS_REPO=${MIDONET_OS_REPO:-https://github.com/midokura/midonet-openstack.git} -MIDONET_OS_BRANCH=${MIDONET_OS_BRANCH:-master} -MIDONET_OS_DIR=${MIDONET_OS_DIR:-$MIDONET_DIR/midonet-openstack} -MIDONET_SETUP_SCRIPT=${MIDONET_SETUP_SCRIPT:-$MIDONET_OS_DIR/bin/setup_midonet_topology.py} - # Save trace setting MY_XTRACE=$(set +o | grep xtrace) set +o xtrace @@ -33,19 +25,12 @@ function configure_midonet() { } function init_midonet() { - - # Initialize DB. Evaluate the output of setup_midonet_topology.py to set - # env variables for provider router ID. - eval `python $MIDONET_SETUP_SCRIPT $MIDONET_API_URL admin $ADMIN_PASSWORD admin provider_devices` - die_if_not_set $LINENO provider_router_id "Error running midonet setup script, provider_router_id was not set." - - iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $provider_router_id + : } function install_midonet() { git_clone $MIDONET_CLIENT_REPO $MIDONET_CLIENT_DIR $MIDONET_CLIENT_BRANCH - git_clone $MIDONET_OS_REPO $MIDONET_OS_DIR $MIDONET_OS_BRANCH - export PYTHONPATH=$MIDONET_CLIENT_DIR/src:$MIDONET_OS_DIR/src:$PYTHONPATH + export PYTHONPATH=$MIDONET_CLIENT_DIR/src:$PYTHONPATH } function start_midonet() { From 2dcc77422348e55b6f7028679647cfbdf872f6a2 Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Tue, 18 Feb 2014 13:45:18 -0500 Subject: [PATCH 0475/4438] Add retry to connect to mongo db This patch adds retries to connect to the mongodb, after a restart. Change-Id: I16e37614736c247fa0b737db2b868c052c2aa33a --- lib/marconi | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index 88312cb1bd..b6ce57a295 100644 --- a/lib/marconi +++ b/lib/marconi @@ -68,7 +68,9 @@ function is_marconi_enabled { # cleanup_marconi() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_marconi() { - mongo marconi --eval "db.dropDatabase();" + if ! timeout $SERVICE_TIMEOUT sh -c "while ! mongo marconi --eval 'db.dropDatabase();'; do sleep 1; done"; then + die $LINENO "Mongo DB did not start" + fi } # configure_marconiclient() - Set config files, create data dirs, etc From de2057290a368e339cb66a8a61d483c90f964089 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Wed, 19 Feb 2014 14:00:42 +0400 Subject: [PATCH 0476/4438] Improve savanna keystone auth configuration We're doing to use common keystone configuration approach - section keystone_authtoken with config opts from the python-keystoneclient auth_token middleware. Change-Id: Ibbe0c76ee3b00045f5cb5134bd7661e9cef6ccdd --- extras.d/70-savanna.sh | 5 +++++ lib/savanna | 29 +++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/extras.d/70-savanna.sh b/extras.d/70-savanna.sh index 6bbe113fa7..edc1376deb 100644 --- a/extras.d/70-savanna.sh +++ b/extras.d/70-savanna.sh @@ -8,6 +8,7 @@ if is_service_enabled savanna; then elif [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Savanna" install_savanna + cleanup_savanna if is_service_enabled horizon; then install_savanna_dashboard fi @@ -29,4 +30,8 @@ if is_service_enabled savanna; then cleanup_savanna_dashboard fi fi + + if [[ "$1" == "clean" ]]; then + cleanup_savanna + fi fi diff --git a/lib/savanna b/lib/savanna index 43c5e386fe..954f0e711e 100644 --- a/lib/savanna +++ b/lib/savanna @@ -10,6 +10,7 @@ # configure_savanna # start_savanna # stop_savanna +# cleanup_savanna # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -33,6 +34,8 @@ SAVANNA_SERVICE_HOST=${SAVANNA_SERVICE_HOST:-$SERVICE_HOST} SAVANNA_SERVICE_PORT=${SAVANNA_SERVICE_PORT:-8386} SAVANNA_SERVICE_PROTOCOL=${SAVANNA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +SAVANNA_AUTH_CACHE_DIR=${SAVANNA_AUTH_CACHE_DIR:-/var/cache/savanna} + # Support entry points installation of console scripts if [[ -d $SAVANNA_DIR/bin ]]; then SAVANNA_BIN_DIR=$SAVANNA_DIR/bin @@ -83,6 +86,14 @@ function create_savanna_accounts() { fi } +# cleanup_savanna() - Remove residual data files, anything left over from +# previous runs that would need to clean up. +function cleanup_savanna() { + + # Cleanup auth cache dir + sudo rm -rf $SAVANNA_AUTH_CACHE_DIR +} + # configure_savanna() - Set config files, create data dirs, etc function configure_savanna() { @@ -94,9 +105,27 @@ function configure_savanna() { # Copy over savanna configuration file and configure common parameters. cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_FILE + # Create auth cache dir + sudo mkdir -p $SAVANNA_AUTH_CACHE_DIR + sudo chown $STACK_USER $SAVANNA_AUTH_CACHE_DIR + rm -rf $SAVANNA_AUTH_CACHE_DIR/* + + # Set obsolete keystone auth configs for backward compatibility + iniset $SAVANNA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_SERVICE_HOST + iniset $SAVANNA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_SERVICE_PORT + iniset $SAVANNA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_SERVICE_PROTOCOL iniset $SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD iniset $SAVANNA_CONF_FILE DEFAULT os_admin_username savanna iniset $SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME + + # Set actual keystone auth configs + iniset $SAVANNA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset $SAVANNA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $SAVANNA_CONF_FILE keystone_authtoken admin_user savanna + iniset $SAVANNA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD + iniset $SAVANNA_CONF_FILE keystone_authtoken signing_dir $SAVANNA_AUTH_CACHE_DIR + iniset $SAVANNA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA + iniset $SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG iniset $SAVANNA_CONF_FILE database connection `database_connection_url savanna` From 27f29440d1b6f5343e02b8beff04c21882139ce7 Mon Sep 17 00:00:00 2001 From: Brett Campbell Date: Wed, 19 Feb 2014 18:23:16 -0800 Subject: [PATCH 0477/4438] Set umask Ensure we have a known-good umask. Otherwise files such as /etc/polkit-1/rules.d/50-libvirt-$STACK_USER.rules may not be readable by non-root users afterwards. Also reworded some comments to be more clear. Change-Id: I7653d4eee062cf32df22aa158da6269b1aa9a558 Closes-Bug: #1265195 --- stack.sh | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/stack.sh b/stack.sh index 4a55225685..a5d66cc8e8 100755 --- a/stack.sh +++ b/stack.sh @@ -5,11 +5,12 @@ # **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron**, # and **Swift** -# This script allows you to specify configuration options of what git -# repositories to use, enabled services, network configuration and various -# passwords. If you are crafty you can run the script on multiple nodes using -# shared settings for common resources (mysql, rabbitmq) and build a multi-node -# developer install. +# This script's options can be changed by setting appropriate environment +# variables. You can configure things like which git repositories to use, +# services to enable, OS images to use, etc. Default values are located in the +# ``stackrc`` file. If you are crafty you can run the script on multiple nodes +# using shared settings for common resources (eg., mysql or rabbitmq) and build +# a multi-node developer install. # To keep this script simple we assume you are running on a recent **Ubuntu** # (12.04 Precise or newer) or **Fedora** (F18 or newer) machine. (It may work @@ -30,6 +31,9 @@ unset LANGUAGE LC_ALL=C export LC_ALL +# Make sure umask is sane +umask 022 + # Keep track of the devstack directory TOP_DIR=$(cd $(dirname "$0") && pwd) From f6368d3eaccc33d5afdbc53a34bf6e37b6e11eb8 Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Thu, 20 Feb 2014 13:31:26 +0900 Subject: [PATCH 0478/4438] Fix comments about System Functions This commit fixes comments about "System Functions". * Add a missing comment about System Functions in the header * Fix singular to plural like others Change-Id: I3feb94cd11a6683ca80093574d60fdf7420e3af2 --- functions-common | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index d92e39cd91..eb9b4ac8bb 100644 --- a/functions-common +++ b/functions-common @@ -15,6 +15,7 @@ # - Process Functions # - Python Functions # - Service Functions +# - System Functions # # The following variables are assumed to be defined by certain functions: # @@ -1280,8 +1281,8 @@ function use_exclusive_service { } -# System Function -# =============== +# System Functions +# ================ # Only run the command if the target file (the last arg) is not on an # NFS filesystem. From 1958c1eb5e3521a70a3cf4185a177da7d17d83e9 Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Thu, 20 Feb 2014 14:32:15 +0900 Subject: [PATCH 0479/4438] Remove unnecessary comment out lines This commit removes some comment-outed codes. If we want to use them, we can get them from the git repository. Change-Id: Ie438c43d332d0631750f0ad458653fc40e23faad --- clean.sh | 9 --------- tools/info.sh | 2 -- tools/xen/build_domU_multi.sh | 6 ------ 3 files changed, 17 deletions(-) diff --git a/clean.sh b/clean.sh index 09f08dc8c2..b2a9405c88 100755 --- a/clean.sh +++ b/clean.sh @@ -101,11 +101,6 @@ if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; th cleanup_nova_hypervisor fi -#if mount | grep $DATA_DIR/swift/drives; then -# sudo umount $DATA_DIR/swift/drives/sdb1 -#fi - - # Clean out /etc sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift @@ -123,9 +118,5 @@ if [[ -n "$SCREEN_LOGDIR" ]] && [[ -d "$SCREEN_LOGDIR" ]]; then sudo rm -rf $SCREEN_LOGDIR fi -# Clean up networking... -# should this be in nova? -# FIXED_IP_ADDR in br100 - # Clean up files rm -f $TOP_DIR/.stackenv diff --git a/tools/info.sh b/tools/info.sh index 3ab7966ab4..1e521b9c4b 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -122,13 +122,11 @@ while read line; do ver=${BASH_REMATCH[2]} else # Unhandled format in freeze file - #echo "unknown: $p" continue fi echo "pip|${p}|${ver}" else # No match in freeze file - #echo "unknown: $p" continue fi done <$FREEZE_FILE diff --git a/tools/xen/build_domU_multi.sh b/tools/xen/build_domU_multi.sh index 0285f42e42..0eb2077414 100755 --- a/tools/xen/build_domU_multi.sh +++ b/tools/xen/build_domU_multi.sh @@ -25,11 +25,5 @@ function build_xva { # because rabbit won't launch with an ip addr hostname :( build_xva HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit" -# Wait till the head node is up -#while ! curl -L http://$HEAD_PUB_IP | grep -q username; do -# echo "Waiting for head node ($HEAD_PUB_IP) to start..." -# sleep 5 -#done - # Build the HA compute host build_xva COMPUTENODE $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api" From 3d60f4dd531388cd01a3aa689053dfc22acbd16c Mon Sep 17 00:00:00 2001 From: Giulio Fidente Date: Thu, 20 Feb 2014 16:43:49 +0100 Subject: [PATCH 0480/4438] Disable tempest backup tests if c-bak unavailable This will update the tempest config to not run the cinder backup tests when the c-bak service is not enabled. Change-Id: I0b6486f1222afa7ae9bd9d13c7d3648d2b870710 --- lib/tempest | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index c8eebfcf05..596750b32f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -314,8 +314,8 @@ function configure_tempest() { iniset $TEMPEST_CONFIG scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} # Volume - if is_service_enabled c-bak; then - iniset $TEMPEST_CONFIG volume volume_backup_enabled "True" + if ! is_service_enabled c-bak; then + iniset $TEMPEST_CONFIG volume-feature-enabled backup False fi CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND) if [ $CINDER_MULTI_LVM_BACKEND == "True" ]; then From 2d65059e725ad27d1e9bdddbea9982d1d8027c01 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 20 Feb 2014 15:49:13 +0100 Subject: [PATCH 0481/4438] Add RHEL7 beta support RHEL7 still in beta status, so it will require the FORCE option, until the GA release. The main notable difference from another RHEL family members, it does not have the mysql alias for the mariadb. Change-Id: Ic90bb6c3dd9447fc80453c3dc1adb22cdfc6226f --- files/rpms/cinder | 2 +- files/rpms/glance | 4 ++-- files/rpms/neutron | 4 ++-- files/rpms/nova | 8 ++++---- files/rpms/swift | 2 +- lib/databases/mysql | 18 +++++++++++++++--- 6 files changed, 25 insertions(+), 13 deletions(-) diff --git a/files/rpms/cinder b/files/rpms/cinder index 623c13e676..199ae10b79 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -4,4 +4,4 @@ qemu-img python-devel postgresql-devel iscsi-initiator-utils -python-lxml #dist:f18,f19,f20 +python-lxml #dist:f18,f19,f20,rhel7 diff --git a/files/rpms/glance b/files/rpms/glance index fffd9c85b4..785ce25df5 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -9,8 +9,8 @@ python-argparse python-devel python-eventlet python-greenlet -python-lxml #dist:f18,f19,f20 -python-paste-deploy #dist:f18,f19,f20 +python-lxml #dist:f18,f19,f20,rhel7 +python-paste-deploy #dist:f18,f19,f20,rhel7 python-routes python-sqlalchemy python-wsgiref diff --git a/files/rpms/neutron b/files/rpms/neutron index 67bf52350a..42d7f68d37 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron @@ -11,8 +11,8 @@ python-greenlet python-iso8601 python-kombu #rhel6 gets via pip -python-paste # dist:f18,f19,f20 -python-paste-deploy # dist:f18,f19,f20 +python-paste # dist:f18,f19,f20,rhel7 +python-paste-deploy # dist:f18,f19,f20,rhel7 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/nova b/files/rpms/nova index ac70ac5d6f..a607d925e1 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -28,11 +28,11 @@ python-kombu python-lockfile python-migrate python-mox -python-paramiko # dist:f18,f19,f20 -# ^ on RHEL, brings in python-crypto which conflicts with version from +python-paramiko # dist:f18,f19,f20,rhel7 +# ^ on RHEL6, brings in python-crypto which conflicts with version from # pip we need -python-paste # dist:f18,f19,f20 -python-paste-deploy # dist:f18,f19,f20 +python-paste # dist:f18,f19,f20,rhel7 +python-paste-deploy # dist:f18,f19,f20,rhel7 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/swift b/files/rpms/swift index 32432bca9b..72253f7752 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -9,7 +9,7 @@ python-eventlet python-greenlet python-netifaces python-nose -python-paste-deploy # dist:f18,f19,f20 +python-paste-deploy # dist:f18,f19,f20,rhel7 python-simplejson python-webob pyxattr diff --git a/lib/databases/mysql b/lib/databases/mysql index 476b4b91b7..31e7163033 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -25,7 +25,11 @@ function cleanup_database_mysql { sudo rm -rf /var/lib/mysql return elif is_fedora; then - MYSQL=mysqld + if [[ $DISTRO =~ (rhel7) ]]; then + MYSQL=mariadb + else + MYSQL=mysqld + fi elif is_suse; then MYSQL=mysql else @@ -48,8 +52,12 @@ function configure_database_mysql { MY_CONF=/etc/mysql/my.cnf MYSQL=mysql elif is_fedora; then + if [[ $DISTRO =~ (rhel7) ]]; then + MYSQL=mariadb + else + MYSQL=mysqld + fi MY_CONF=/etc/my.cnf - MYSQL=mysqld elif is_suse; then MY_CONF=/etc/my.cnf MYSQL=mysql @@ -135,7 +143,11 @@ EOF fi # Install mysql-server if is_ubuntu || is_fedora; then - install_package mysql-server + if [[ $DISTRO =~ (rhel7) ]]; then + install_package mariadb-server + else + install_package mysql-server + fi elif is_suse; then if ! is_package_installed mariadb; then install_package mysql-community-server From 09bb9e67923c1de4d4479000eb329b139732c57b Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 21 Feb 2014 14:33:29 +1100 Subject: [PATCH 0482/4438] Add more files to run_tests.sh bash8 check Add functions-common, stackrc, openrc, exerciserc, eucarc to bash8 checks Change-Id: Ic14b348c871bf98bf35c7e866e715bb75bdccf97 --- run_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run_tests.sh b/run_tests.sh index 9d9d18661e..b4f26c5709 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -20,7 +20,7 @@ if [[ -n $@ ]]; then else LIBS=`find lib -type f | grep -v \.md` SCRIPTS=`find . -type f -name \*\.sh` - EXTRA="functions" + EXTRA="functions functions-common stackrc openrc exerciserc eucarc" FILES="$SCRIPTS $LIBS $EXTRA" fi From f8e86bb3129c6aa5cb9c70ceb2a55f01b2dd1bf0 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 21 Feb 2014 15:16:31 +1100 Subject: [PATCH 0483/4438] Un-nest generate_swift_config I think this got accidentally nested during some code refactorizing? Change-Id: Ie486cf3395b6acf3a10eb32e116d39ca56134b9f --- lib/swift | 79 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 40 insertions(+), 39 deletions(-) diff --git a/lib/swift b/lib/swift index df586abe8b..6c33af5082 100644 --- a/lib/swift +++ b/lib/swift @@ -231,6 +231,46 @@ function _config_swift_apache_wsgi() { done } +# This function generates an object/container/account configuration +# emulating 4 nodes on different ports +function generate_swift_config() { + local swift_node_config=$1 + local node_id=$2 + local bind_port=$3 + local server_type=$4 + + log_facility=$[ node_id - 1 ] + node_path=${SWIFT_DATA_DIR}/${node_number} + + iniuncomment ${swift_node_config} DEFAULT user + iniset ${swift_node_config} DEFAULT user ${STACK_USER} + + iniuncomment ${swift_node_config} DEFAULT bind_port + iniset ${swift_node_config} DEFAULT bind_port ${bind_port} + + iniuncomment ${swift_node_config} DEFAULT swift_dir + iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONF_DIR} + + iniuncomment ${swift_node_config} DEFAULT devices + iniset ${swift_node_config} DEFAULT devices ${node_path} + + iniuncomment ${swift_node_config} DEFAULT log_facility + iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} + + iniuncomment ${swift_node_config} DEFAULT workers + iniset ${swift_node_config} DEFAULT workers 1 + + iniuncomment ${swift_node_config} DEFAULT disable_fallocate + iniset ${swift_node_config} DEFAULT disable_fallocate true + + iniuncomment ${swift_node_config} DEFAULT mount_check + iniset ${swift_node_config} DEFAULT mount_check false + + iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode + iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes +} + + # configure_swift() - Set config files, create data dirs and loop image function configure_swift() { local swift_pipeline="${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH}" @@ -364,45 +404,6 @@ EOF cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONF_DIR}/swift.conf iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH} - # This function generates an object/container/account configuration - # emulating 4 nodes on different ports - function generate_swift_config() { - local swift_node_config=$1 - local node_id=$2 - local bind_port=$3 - local server_type=$4 - - log_facility=$[ node_id - 1 ] - node_path=${SWIFT_DATA_DIR}/${node_number} - - iniuncomment ${swift_node_config} DEFAULT user - iniset ${swift_node_config} DEFAULT user ${STACK_USER} - - iniuncomment ${swift_node_config} DEFAULT bind_port - iniset ${swift_node_config} DEFAULT bind_port ${bind_port} - - iniuncomment ${swift_node_config} DEFAULT swift_dir - iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONF_DIR} - - iniuncomment ${swift_node_config} DEFAULT devices - iniset ${swift_node_config} DEFAULT devices ${node_path} - - iniuncomment ${swift_node_config} DEFAULT log_facility - iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} - - iniuncomment ${swift_node_config} DEFAULT workers - iniset ${swift_node_config} DEFAULT workers 1 - - iniuncomment ${swift_node_config} DEFAULT disable_fallocate - iniset ${swift_node_config} DEFAULT disable_fallocate true - - iniuncomment ${swift_node_config} DEFAULT mount_check - iniset ${swift_node_config} DEFAULT mount_check false - - iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode - iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes - } - for node_number in ${SWIFT_REPLICAS_SEQ}; do swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config} From 8e1a1ffdfbf59e01688fd2e6e007ab72d49263ed Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Fri, 21 Feb 2014 14:45:48 +0000 Subject: [PATCH 0484/4438] Set stack_user_domain config correctly The recently merged patch which creates a domain for heat fails to correctly set the domain ID in heat.conf, so move the setting of the config option to immediately after we create the domain. Also add the missing DEFAULT section identifier in the iniset, and use OS_TOKEN instead of OS_SERVICE token, because the stack.sh comment says this is exported for the openstackclient workaround. Change-Id: I912f774f1215d68cbcfe44229b371f318d92966a Closes-Bug: #1283075 --- lib/heat | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/lib/heat b/lib/heat index efb01ef3b8..af10fa6f1d 100644 --- a/lib/heat +++ b/lib/heat @@ -110,15 +110,6 @@ function configure_heat() { iniset $HEAT_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 iniset $HEAT_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens - # stack user domain - # Note we have to pass token/endpoint here because the current endpoint and - # version negotiation in OSC means just --os-identity-api-version=3 won't work - KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" - D_ID=$(openstack --os-token $OS_SERVICE_TOKEN --os-url=$KS_ENDPOINT_V3 \ - --os-identity-api-version=3 domain show heat \ - | grep ' id ' | get_field 2) - iniset $HEAT_CONF stack_user_domain ${D_ID} - # paste_deploy [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_CONF paste_deploy flavor standalone @@ -211,9 +202,11 @@ function create_heat_accounts() { # Note we have to pass token/endpoint here because the current endpoint and # version negotiation in OSC means just --os-identity-api-version=3 won't work KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" - openstack --os-token $OS_SERVICE_TOKEN --os-url=$KS_ENDPOINT_V3 \ + D_ID=$(openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \ --os-identity-api-version=3 domain create heat \ - --description "Owns users and projects created by heat" + --description "Owns users and projects created by heat" \ + | grep ' id ' | get_field 2) + iniset $HEAT_CONF DEFAULT stack_user_domain ${D_ID} } # Restore xtrace From f2ca87a8d8ded80384b2cafb46ef2ca4cf19a986 Mon Sep 17 00:00:00 2001 From: Rabi Mishra Date: Fri, 21 Feb 2014 20:08:28 +0530 Subject: [PATCH 0485/4438] Implements fix to run lbaas service on fedora with devstack changes 'user_group = nobody' in 'haproxy' section of lbaas_agent.ini Change-Id: I801fec5a11d8abd97cb6f5cdff35fabb9eaf9000 Closes-Bug: 1283064 --- lib/neutron_plugins/services/loadbalancer | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer index 5d7a94e5d8..3714142a83 100644 --- a/lib/neutron_plugins/services/loadbalancer +++ b/lib/neutron_plugins/services/loadbalancer @@ -38,6 +38,7 @@ function neutron_agent_lbaas_configure_agent() { if is_fedora; then iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT user_group "nobody" + iniset $LBAAS_AGENT_CONF_FILENAME haproxy user_group "nobody" fi } From 67df3b2fc2b2e7b1cfb0418e59f96db7561277be Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Thu, 20 Feb 2014 14:48:59 -0500 Subject: [PATCH 0486/4438] Bind Marconi to SERVICE_HOST & add health check This patch, 1. Binds Marconi to SERVICE_HOST, to be consistent with other services. 2. Adds a health check to verify if marconi started correctly. Change-Id: I1d48d0e610369cc97d479a5cd47b2bd11656da3f --- lib/marconi | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/lib/marconi b/lib/marconi index b6ce57a295..ee7bf0ec7b 100644 --- a/lib/marconi +++ b/lib/marconi @@ -51,6 +51,11 @@ MARCONI_BRANCH=${MARCONI_BRANCH:-master} MARCONICLIENT_REPO=${MARCONICLIENT_REPO:-${GIT_BASE}/openstack/python-marconiclient.git} MARCONICLIENT_BRANCH=${MARCONICLIENT_BRANCH:-master} +# Set Marconi Connection Info +MARCONI_SERVICE_HOST=${MARCONI_SERVICE_HOST:-$SERVICE_HOST} +MARCONI_SERVICE_PORT=${MARCONI_SERVICE_PORT:-8888} +MARCONI_SERVICE_PROTOCOL=${MARCONI_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} + # Tell Tempest this project is present TEMPEST_SERVICES+=,marconi @@ -89,7 +94,7 @@ function configure_marconi() { sudo chown $USER $MARCONI_API_LOG_DIR iniset $MARCONI_CONF DEFAULT verbose True - iniset $MARCONI_CONF 'drivers:transport:wsgi' bind '0.0.0.0' + iniset $MARCONI_CONF 'drivers:transport:wsgi' bind $MARCONI_SERVICE_HOST iniset $MARCONI_CONF keystone_authtoken auth_protocol http iniset $MARCONI_CONF keystone_authtoken admin_user marconi @@ -142,6 +147,10 @@ function install_marconiclient() { # start_marconi() - Start running processes, including screen function start_marconi() { screen_it marconi-server "marconi-server --config-file $MARCONI_CONF" + echo "Waiting for Marconi to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT/v1/health; do sleep 1; done"; then + die $LINENO "Marconi did not start" + fi } # stop_marconi() - Stop running processes @@ -176,9 +185,9 @@ function create_marconi_accounts() { openstack endpoint create \ $MARCONI_SERVICE \ --region RegionOne \ - --publicurl "http://$SERVICE_HOST:8888" \ - --adminurl "http://$SERVICE_HOST:8888" \ - --internalurl "http://$SERVICE_HOST:8888" + --publicurl "$MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT" \ + --adminurl "$MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT" \ + --internalurl "$MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT" fi } From f5aa05c0ab1e1ae0c9f56d5eaf9164adcd4cd7b9 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Fri, 21 Feb 2014 22:03:59 -0500 Subject: [PATCH 0487/4438] Add support for oslo.vmware Change-Id: I2162a339b1869c27850afcda6be3c4e11de94e0e --- lib/oslo | 4 ++++ stackrc | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/lib/oslo b/lib/oslo index b089842ae4..516ce1c3a9 100644 --- a/lib/oslo +++ b/lib/oslo @@ -24,6 +24,7 @@ CLIFF_DIR=$DEST/cliff OSLOCFG_DIR=$DEST/oslo.config OSLOMSG_DIR=$DEST/oslo.messaging OSLORWRAP_DIR=$DEST/oslo.rootwrap +OSLOVMWARE_DIR=$DEST/oslo.vmware PYCADF_DIR=$DEST/pycadf STEVEDORE_DIR=$DEST/stevedore TASKFLOW_DIR=$DEST/taskflow @@ -49,6 +50,9 @@ function install_oslo() { git_clone $OSLORWRAP_REPO $OSLORWRAP_DIR $OSLORWRAP_BRANCH setup_develop $OSLORWRAP_DIR + git_clone $OSLOVMWARE_REPO $OSLOVMWARE_DIR $OSLOVMWARE_BRANCH + setup_develop $OSLOVMWARE_DIR + git_clone $PYCADF_REPO $PYCADF_DIR $PYCADF_BRANCH setup_develop $PYCADF_DIR diff --git a/stackrc b/stackrc index 0b081c4014..91f5751966 100644 --- a/stackrc +++ b/stackrc @@ -167,6 +167,10 @@ OSLOMSG_BRANCH=${OSLOMSG_BRANCH:-master} OSLORWRAP_REPO=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git} OSLORWRAP_BRANCH=${OSLORWRAP_BRANCH:-master} +# oslo.vmware +OSLOVMWARE_REPO=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git} +OSLOVMWARE_BRANCH=${OSLOVMWARE_BRANCH:-master} + # pycadf auditing library PYCADF_REPO=${PYCADF_REPO:-${GIT_BASE}/openstack/pycadf.git} PYCADF_BRANCH=${PYCADF_BRANCH:-master} From d53ad0b07d3e7bdd2668c2d3f1815d95d4b8f532 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 20 Feb 2014 13:55:13 +1100 Subject: [PATCH 0488/4438] Add GIT_TIMEOUT variable to watch git operations During my CI testing of each devstack change I can often see git get itself stuck and hang indefinitely. I'm not sure if it's transient network issues, or issues at the remote end (seen with both github.com and git.openstack.org) but it hits fairly frequently. Retrying the command usually gets it going again. Searching for "git hanging" and similar shows its not entirely uncommon... This adds a watchdog timeout for remote git operations based on a new environment variable GIT_TIMEOUT. It will retry 3 times before giving up. The wrapper is applied to the main remote git calls. Change-Id: I5b0114ca26b7ac2f25993264f761cba9ec8c09e1 --- functions-common | 41 ++++++++++++++++++++++++++++++++++++----- stackrc | 11 +++++++++++ 2 files changed, 47 insertions(+), 5 deletions(-) diff --git a/functions-common b/functions-common index d92e39cd91..9cd5acd47b 100644 --- a/functions-common +++ b/functions-common @@ -498,16 +498,16 @@ function git_clone { if [[ ! -d $GIT_DEST ]]; then [[ "$ERROR_ON_CLONE" = "True" ]] && \ die $LINENO "Cloning not allowed in this configuration" - git clone $GIT_REMOTE $GIT_DEST + git_timed clone $GIT_REMOTE $GIT_DEST fi cd $GIT_DEST - git fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD + git_timed fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD else # do a full clone only if the directory doesn't exist if [[ ! -d $GIT_DEST ]]; then [[ "$ERROR_ON_CLONE" = "True" ]] && \ die $LINENO "Cloning not allowed in this configuration" - git clone $GIT_REMOTE $GIT_DEST + git_timed clone $GIT_REMOTE $GIT_DEST cd $GIT_DEST # This checkout syntax works for both branches and tags git checkout $GIT_REF @@ -516,7 +516,7 @@ function git_clone { cd $GIT_DEST # set the url to pull from and fetch git remote set-url origin $GIT_REMOTE - git fetch origin + git_timed fetch origin # remove the existing ignored files (like pyc) as they cause breakage # (due to the py files having older timestamps than our pyc, so python # thinks the pyc files are correct using them) @@ -541,6 +541,37 @@ function git_clone { git show --oneline | head -1 } +# git can sometimes get itself infinitely stuck with transient network +# errors or other issues with the remote end. This wraps git in a +# timeout/retry loop and is intended to watch over non-local git +# processes that might hang. GIT_TIMEOUT, if set, is passed directly +# to timeout(1); otherwise the default value of 0 maintains the status +# quo of waiting forever. +# usage: git_timed +function git_timed() { + local count=0 + local timeout=0 + + if [[ -n "${GIT_TIMEOUT}" ]]; then + timeout=${GIT_TIMEOUT} + fi + + until timeout -s SIGINT ${timeout} git "$@"; do + # 124 is timeout(1)'s special return code when it reached the + # timeout; otherwise assume fatal failure + if [[ $? -ne 124 ]]; then + die $LINENO "git call failed: [git $@]" + fi + + count=$(($count + 1)) + warn "timeout ${count} for git call: [git $@]" + if [ $count -eq 3 ]; then + die $LINENO "Maximum of 3 git retries reached" + fi + sleep 5 + done +} + # git update using reference as a branch. # git_update_branch ref function git_update_branch() { @@ -571,7 +602,7 @@ function git_update_tag() { git tag -d $GIT_TAG # fetching given tag only - git fetch origin tag $GIT_TAG + git_timed fetch origin tag $GIT_TAG git checkout -f $GIT_TAG } diff --git a/stackrc b/stackrc index 56fa40269c..8cec09eb28 100644 --- a/stackrc +++ b/stackrc @@ -69,6 +69,17 @@ fi # (currently only implemented for MySQL backend) DATABASE_QUERY_LOGGING=$(trueorfalse True $DATABASE_QUERY_LOGGING) +# Set a timeout for git operations. If git is still running when the +# timeout expires, the command will be retried up to 3 times. This is +# in the format for timeout(1); +# +# DURATION is a floating point number with an optional suffix: 's' +# for seconds (the default), 'm' for minutes, 'h' for hours or 'd' +# for days. +# +# Zero disables timeouts +GIT_TIMEOUT=${GIT_TIMEOUT:-0} + # Repositories # ------------ From b93ee25b64de5d587c2e0889a9ce689c92aaa0f9 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Sun, 23 Feb 2014 20:41:07 -0500 Subject: [PATCH 0489/4438] make bash8 take a -v flag this ensures that we actually know we are processing all the files we believe we are. Change-Id: I8e99b5f9dc987c946586475f374f7040ca63a478 --- run_tests.sh | 2 +- tools/bash8.py | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index b4f26c5709..a0bfbee0c0 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -26,4 +26,4 @@ fi echo "Running bash8..." -./tools/bash8.py $FILES +./tools/bash8.py -v $FILES diff --git a/tools/bash8.py b/tools/bash8.py index 7552e0d642..ca0abd964a 100755 --- a/tools/bash8.py +++ b/tools/bash8.py @@ -110,11 +110,13 @@ def end_of_multiline(line, token): return False -def check_files(files): +def check_files(files, verbose): in_multiline = False logical_line = "" token = False for line in fileinput.input(files): + if verbose and fileinput.isfirstline(): + print "Running bash8 on %s" % fileinput.filename() # NOTE(sdague): multiline processing of heredocs is interesting if not in_multiline: logical_line = line @@ -141,13 +143,14 @@ def get_options(): parser.add_argument('files', metavar='file', nargs='+', help='files to scan for errors') parser.add_argument('-i', '--ignore', help='Rules to ignore') + parser.add_argument('-v', '--verbose', action='store_true', default=False) return parser.parse_args() def main(): opts = get_options() register_ignores(opts.ignore) - check_files(opts.files) + check_files(opts.files, opts.verbose) if ERRORS > 0: print("%d bash8 error(s) found" % ERRORS) From 010959de403660e13eca54c6ef306ef5df24b436 Mon Sep 17 00:00:00 2001 From: Jim Rollenhagen Date: Tue, 18 Feb 2014 13:17:58 -0600 Subject: [PATCH 0490/4438] Perform safety checks in create-stack-user.sh This adds some safety checks to the stack user creation script. This includes: - Using set -o errexit to exit early on errors - Make sure STACK_USER is set before doing anything with it Change-Id: If027daddd03e32c5ba3c2ebb05ad5b27d2868b0a --- tools/create-stack-user.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh index 50f6592a3a..9c29ecd901 100755 --- a/tools/create-stack-user.sh +++ b/tools/create-stack-user.sh @@ -15,6 +15,7 @@ # and it was time for this nonsense to stop. Run this script as root to create # the user and configure sudo. +set -o errexit # Keep track of the devstack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) @@ -27,12 +28,14 @@ source $TOP_DIR/functions # and ``DISTRO`` GetDistro -# Needed to get ``ENABLED_SERVICES`` +# Needed to get ``ENABLED_SERVICES`` and ``STACK_USER`` source $TOP_DIR/stackrc # Give the non-root user the ability to run as **root** via ``sudo`` is_package_installed sudo || install_package sudo +[[ -z "$STACK_USER" ]] && die "STACK_USER is not set. Exiting." + if ! getent group $STACK_USER >/dev/null; then echo "Creating a group called $STACK_USER" groupadd $STACK_USER From e9648276a6396a630d0eca812e36fc82ec4b2a0c Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Sun, 23 Feb 2014 18:55:51 +0100 Subject: [PATCH 0491/4438] Removes the dependence with aptitude Removes the dependence with aptitude by replacing the call of: aptitude purge -y ~npackage by apt_get purge -y package* Change-Id: I08875ffad9dc6293047827666f02453a355b16ea Closes-Bug: 1281410 --- lib/databases/mysql | 2 +- lib/databases/postgresql | 2 +- lib/rpc_backend | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 476b4b91b7..3c002f7c43 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -21,7 +21,7 @@ function cleanup_database_mysql { if is_ubuntu; then # Get ruthless with mysql stop_service $MYSQL - sudo aptitude purge -y ~nmysql-server + apt_get purge -y mysql* sudo rm -rf /var/lib/mysql return elif is_fedora; then diff --git a/lib/databases/postgresql b/lib/databases/postgresql index c459feb9e0..96a5947a60 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -21,7 +21,7 @@ function cleanup_database_postgresql { stop_service postgresql if is_ubuntu; then # Get ruthless with mysql - sudo aptitude purge -y ~npostgresql + apt_get purge -y postgresql* return elif is_fedora; then uninstall_package postgresql-server diff --git a/lib/rpc_backend b/lib/rpc_backend index 3651bc0d20..34f576f5b8 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -67,7 +67,7 @@ function cleanup_rpc_backend { sudo killall epmd || sudo killall -9 epmd if is_ubuntu; then # And the Erlang runtime too - sudo aptitude purge -y ~nerlang + apt_get purge -y erlang* fi elif is_service_enabled qpid; then if is_fedora; then From 2e2b28b531e392ac59fdfa948bc79a0c74b2f332 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 19 Feb 2014 09:02:02 -0500 Subject: [PATCH 0492/4438] reset prereqs status on clean.sh when running a clean, we should really reset the prereq status as well, as this should start us back from zeroish. Change-Id: I5fae151ab13bcf7fb82feb1e91eed19e0215dc59 --- clean.sh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/clean.sh b/clean.sh index 09f08dc8c2..465b304a17 100755 --- a/clean.sh +++ b/clean.sh @@ -128,4 +128,10 @@ fi # FIXED_IP_ADDR in br100 # Clean up files -rm -f $TOP_DIR/.stackenv + +FILES_TO_CLEAN=".localrc.auto docs-files docs/ shocco/ stack-screenrc test*.conf* test.ini*" +FILES_TO_CLEAN+=".stackenv .prereqs" + +for file in FILES_TO_CLEAN; do + rm -f $TOP_DIR/$file +done From f1eb0475d9320875f1a6c4a9c398e9388350d206 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 11 Feb 2014 17:28:56 -0500 Subject: [PATCH 0493/4438] don't check for service enabled service enabled is something that's not required for sysstat and friends, because instead we actually can do this with screen_it. Change-Id: I4aa5787101cb0def46690f38a7f82effbb85f502 --- stack.sh | 52 +++++++++++++++++++++++----------------------------- 1 file changed, 23 insertions(+), 29 deletions(-) diff --git a/stack.sh b/stack.sh index 4a55225685..ce19b8fc5c 100755 --- a/stack.sh +++ b/stack.sh @@ -863,42 +863,36 @@ fi init_service_check -# Sysstat +# Sysstat and friends # ------- # If enabled, systat has to start early to track OpenStack service startup. -if is_service_enabled sysstat; then - # what we want to measure - # -u : cpu statitics - # -q : load - # -b : io load rates - # -w : process creation and context switch rates - SYSSTAT_OPTS="-u -q -b -w" - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it sysstat "cd $TOP_DIR; ./tools/sar_filter.py $SYSSTAT_OPTS -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" - else - screen_it sysstat "./tools/sar_filter.py $SYSSTAT_OPTS $SYSSTAT_INTERVAL" - fi +# what we want to measure +# -u : cpu statitics +# -q : load +# -b : io load rates +# -w : process creation and context switch rates +SYSSTAT_OPTS="-u -q -b -w" +if [[ -n ${SCREEN_LOGDIR} ]]; then + screen_it sysstat "cd $TOP_DIR; ./tools/sar_filter.py $SYSSTAT_OPTS -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" +else + screen_it sysstat "./tools/sar_filter.py $SYSSTAT_OPTS $SYSSTAT_INTERVAL" fi -if is_service_enabled dstat; then - # Per-process stats - DSTAT_OPTS="-tcndylp --top-cpu-adv" - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE" - else - screen_it dstat "dstat $DSTAT_OPTS" - fi +# A better kind of sysstat, with the top process per time slice +DSTAT_OPTS="-tcndylp --top-cpu-adv" +if [[ -n ${SCREEN_LOGDIR} ]]; then + screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE" +else + screen_it dstat "dstat $DSTAT_OPTS" fi -if is_service_enabled pidstat; then - # Per-process stats - PIDSTAT_OPTS="-l -p ALL -T ALL" - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it pidstat "cd $TOP_DIR; pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL > $SCREEN_LOGDIR/$PIDSTAT_FILE" - else - screen_it pidstat "pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL" - fi +# Per-process stats +PIDSTAT_OPTS="-l -p ALL -T ALL" +if [[ -n ${SCREEN_LOGDIR} ]]; then + screen_it pidstat "cd $TOP_DIR; pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL > $SCREEN_LOGDIR/$PIDSTAT_FILE" +else + screen_it pidstat "pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL" fi From af616d93411a9a446ce0d2e72ea4fb7d281cd940 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 17 Feb 2014 12:57:55 -0600 Subject: [PATCH 0494/4438] Move setup_develop() to common It's in the wrong place for current Grenade Change-Id: Ia670198332af5945a56d708cd83d9239df0c2287 --- functions | 54 ------------------------------------------------ functions-common | 52 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 54 deletions(-) diff --git a/functions b/functions index 6979c6c155..3101111c63 100644 --- a/functions +++ b/functions @@ -44,60 +44,6 @@ function cleanup_tmp { } -# ``pip install -e`` the package, which processes the dependencies -# using pip before running `setup.py develop` -# -# Updates the dependencies in project_dir from the -# openstack/requirements global list before installing anything. -# -# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``, ``UNDO_REQUIREMENTS`` -# setup_develop directory -function setup_develop() { - local project_dir=$1 - - echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" - - # Don't update repo if local changes exist - # Don't use buggy "git diff --quiet" - (cd $project_dir && git diff --exit-code >/dev/null) - local update_requirements=$? - - if [ $update_requirements -eq 0 ]; then - (cd $REQUIREMENTS_DIR; \ - $SUDO_CMD python update.py $project_dir) - fi - - setup_develop_no_requirements_update $project_dir - - # We've just gone and possibly modified the user's source tree in an - # automated way, which is considered bad form if it's a development - # tree because we've screwed up their next git checkin. So undo it. - # - # However... there are some circumstances, like running in the gate - # where we really really want the overridden version to stick. So provide - # a variable that tells us whether or not we should UNDO the requirements - # changes (this will be set to False in the OpenStack ci gate) - if [ $UNDO_REQUIREMENTS = "True" ]; then - if [ $update_requirements -eq 0 ]; then - (cd $project_dir && git reset --hard) - fi - fi -} - - -# ``pip install -e`` the package, which processes the dependencies -# using pip before running `setup.py develop` -# Uses globals ``STACK_USER`` -# setup_develop_no_requirements_update directory -function setup_develop_no_requirements_update() { - local project_dir=$1 - - pip_install -e $project_dir - # ensure that further actions can do things like setup.py sdist - safe_chown -R $STACK_USER $1/*.egg-info -} - - # Retrieve an image from a URL and upload into Glance. # Uses the following variables: # diff --git a/functions-common b/functions-common index d92e39cd91..d6f71b4825 100644 --- a/functions-common +++ b/functions-common @@ -1130,6 +1130,58 @@ function pip_install { && $SUDO_PIP rm -rf ${pip_build_tmp} } +# ``pip install -e`` the package, which processes the dependencies +# using pip before running `setup.py develop` +# +# Updates the dependencies in project_dir from the +# openstack/requirements global list before installing anything. +# +# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``, ``UNDO_REQUIREMENTS`` +# setup_develop directory +function setup_develop() { + local project_dir=$1 + + echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" + + # Don't update repo if local changes exist + # Don't use buggy "git diff --quiet" + (cd $project_dir && git diff --exit-code >/dev/null) + local update_requirements=$? + + if [ $update_requirements -eq 0 ]; then + (cd $REQUIREMENTS_DIR; \ + $SUDO_CMD python update.py $project_dir) + fi + + setup_develop_no_requirements_update $project_dir + + # We've just gone and possibly modified the user's source tree in an + # automated way, which is considered bad form if it's a development + # tree because we've screwed up their next git checkin. So undo it. + # + # However... there are some circumstances, like running in the gate + # where we really really want the overridden version to stick. So provide + # a variable that tells us whether or not we should UNDO the requirements + # changes (this will be set to False in the OpenStack ci gate) + if [ $UNDO_REQUIREMENTS = "True" ]; then + if [ $update_requirements -eq 0 ]; then + (cd $project_dir && git reset --hard) + fi + fi +} + +# ``pip install -e`` the package, which processes the dependencies +# using pip before running `setup.py develop` +# Uses globals ``STACK_USER`` +# setup_develop_no_requirements_update directory +function setup_develop_no_requirements_update() { + local project_dir=$1 + + pip_install -e $project_dir + # ensure that further actions can do things like setup.py sdist + safe_chown -R $STACK_USER $1/*.egg-info +} + # Service Functions # ================= From 71ef61ac8727137da01b3ca970a70b3adc81fd51 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Wed, 19 Feb 2014 22:19:24 -0800 Subject: [PATCH 0495/4438] Add variable to configure the run of IPv6 Tests Related Tempest change: https://review.openstack.org/#/c/74933/ Closes-bug: 1282387 Change-Id: If9e9c5319c484dc4c00ed3bdcefc132410719b87 --- lib/tempest | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/tempest b/lib/tempest index 596750b32f..d2227feed9 100644 --- a/lib/tempest +++ b/lib/tempest @@ -63,6 +63,9 @@ TEMPEST_VOLUME_DRIVER=${TEMPEST_VOLUME_DRIVER:-default} TEMPEST_VOLUME_VENDOR=${TEMPEST_VOLUME_VENDOR:-"Open Source"} TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-iSCSI} +# Neutron/Network variables +IPV6_ENABLED=$(trueorfalse True $IPV6_ENABLED) + # Functions # --------- @@ -285,11 +288,13 @@ function configure_tempest() { # Compute admin iniset $TEMPEST_CONFIG "compute-admin" password "$password" # DEPRECATED + # Network iniset $TEMPEST_CONFIG network api_version 2.0 iniset $TEMPEST_CONFIG network tenant_networks_reachable "$tenant_networks_reachable" iniset $TEMPEST_CONFIG network public_network_id "$public_network_id" iniset $TEMPEST_CONFIG network public_router_id "$public_router_id" iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE" + iniset $TEMPEST_CONFIG network ipv6_enabled "$IPV6_ENABLED" # boto iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" From 041fa712472d887550a540dd50ade546f847c6b4 Mon Sep 17 00:00:00 2001 From: David Kranz Date: Mon, 24 Feb 2014 13:30:59 -0500 Subject: [PATCH 0496/4438] Make admin_bind_host configurable The use case is running devstack inside an OpenStack vm and running tempest from some other machine. To make the catalog export urls that can be accessed from off the devstack machine, you need to set KEYSTONE_SERVICE_HOST to an external IP. But devstack uses that address in its setup of keystone in addition to exporting in the catalog. Because OpenStack has an issue where a vm cannot access itself through its own floating ip, devstack fails. There is no way to have this use case by providing an ip address. The workaround is to use the hostname of the devstack machine. That worked until recently when a change was made to set admin_bind_host to the value of KEYSTONE_SERVICE_HOST. The result is that port 35357 is only opened locally. This change allows the devstack user to restore the original behavior allowing this use case. Change-Id: I97b938b305b7dd878397e7e64462650064e59cd2 Closes-Bug: #1283803 --- lib/keystone | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index cebb4d3522..44ac94d802 100644 --- a/lib/keystone +++ b/lib/keystone @@ -70,6 +70,8 @@ KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001} KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +# Bind hosts +KEYSTONE_ADMIN_BIND_HOST=${KEYSTONE_ADMIN_BIND_HOST:-$KEYSTONE_SERVICE_HOST} # Set the tenant for service accounts in Keystone SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} @@ -178,7 +180,7 @@ function configure_keystone() { # Set the URL advertised in the ``versions`` structure returned by the '/' route iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(public_port)s/" iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(admin_port)s/" - iniset $KEYSTONE_CONF DEFAULT admin_bind_host "$KEYSTONE_SERVICE_HOST" + iniset $KEYSTONE_CONF DEFAULT admin_bind_host "$KEYSTONE_ADMIN_BIND_HOST" # Register SSL certificates if provided if is_ssl_enabled_service key; then From 80313b24404105fb68d1488d48e00574129ccd69 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Sun, 23 Feb 2014 09:55:01 -0500 Subject: [PATCH 0497/4438] match devstack-gate format support millisecond resolution and the | separator for ts vs. content. everything else in openstack is running at millisecond resolution, and some times it's actually useful to see that when debugging gate failures. Change-Id: I2227ab0b4965cd1a24b579bdf2ba8c1f9a432f70 --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index c153132485..eaccc76537 100755 --- a/stack.sh +++ b/stack.sh @@ -530,9 +530,9 @@ if [[ -n "$LOGFILE" ]]; then # Redirect stdout/stderr to tee to write the log file exec 1> >( awk ' { - cmd ="date +\"%Y-%m-%d %H:%M:%S \"" + cmd ="date +\"%Y-%m-%d %H:%M:%S.%3N | \"" cmd | getline now - close("date +\"%Y-%m-%d %H:%M:%S \"") + close("date +\"%Y-%m-%d %H:%M:%S.%3N | \"") sub(/^/, now) print fflush() From 4f1fee6eae300a3384900df06ebc857e95854eb0 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 24 Feb 2014 14:24:13 +1100 Subject: [PATCH 0498/4438] Fix missing libffi-devel for python-glanceclient python-glanceclient is failing to install on rhel6 with a dependency chain from pip as cryptography>=0.2.1 (from pyOpenSSL>=0.11->python-glanceclient==0.12.0.56.gb8a850c) cryptography requires libffi-devel to build. I'm not sure what changed, but remove it from "testonly" so it is always installed. However, RHEL6 includes this in the optional repo, so we enable this repo in the fixup script. Change-Id: I9da0e91b75f41578861ee9685b8c7e91dd12dae7 --- files/apts/glance | 2 +- files/rpms/glance | 2 +- tools/fixup_stuff.sh | 4 ++++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/files/apts/glance b/files/apts/glance index 22787bc5a2..6dc878e4de 100644 --- a/files/apts/glance +++ b/files/apts/glance @@ -1,5 +1,5 @@ gcc -libffi-dev # testonly +libffi-dev libmysqlclient-dev # testonly libpq-dev # testonly libssl-dev # testonly diff --git a/files/rpms/glance b/files/rpms/glance index 785ce25df5..25c5d3902b 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -1,5 +1,5 @@ gcc -libffi-devel # testonly +libffi-devel libxml2-devel # testonly libxslt-devel # testonly mysql-devel # testonly diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 47b0cd10cd..048024a325 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -98,6 +98,10 @@ if [[ $DISTRO =~ (rhel6) ]]; then sudo setenforce 0 fi + # make sure we have the "optional" repo enabled; it provides some + # packages like libffi-devel for example + sudo yum-config-manager --enable rhel-6-server-optional-rpms + # If the ``dbus`` package was installed by DevStack dependencies the # uuid may not be generated because the service was never started (PR#598200), # causing Nova to stop later on complaining that ``/var/lib/dbus/machine-id`` From 5f90fc06f5cd3138de112eddf1b04fe1db56d226 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 24 Feb 2014 15:40:42 +1100 Subject: [PATCH 0499/4438] Fix permissions for tempest.conf The current script uses 'sudo' to copy tempest.conf.sample and thus the .conf file gets owned by root. It then makes the permissions 644, meaning that when the 'stack' user does the iniset() calls, it doesn't have permisson on the .conf file. Since the dir has been chowned to the stack user, it seems safe to just copy the sample file in without sudo. In addition, I moved the $TEMPEST_CONFIG_DIR creation closer to the copy to make it clearer what's going on. Seems to be related to dc4dc7f03335e26ea3d86b6184f0475cc5f3d51b Fixes bug: #1284378 Change-Id: I103b4e90cbcfa693c9cef319f4135868a1b83de3 --- lib/tempest | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/lib/tempest b/lib/tempest index 596750b32f..83ce5d2e2a 100644 --- a/lib/tempest +++ b/lib/tempest @@ -87,11 +87,6 @@ function configure_tempest() { local boto_instance_type="m1.tiny" local ssh_connect_method="fixed" - if [[ ! -d $TEMPEST_CONFIG_DIR ]]; then - sudo mkdir -p $TEMPEST_CONFIG_DIR - fi - sudo chown $STACK_USER $TEMPEST_CONFIG_DIR - # TODO(afazekas): # sudo python setup.py deploy @@ -142,8 +137,12 @@ function configure_tempest() { # Create tempest.conf from tempest.conf.sample # copy every time, because the image UUIDS are going to change - sudo cp $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG - sudo chmod 644 $TEMPEST_CONFIG + if [[ ! -d $TEMPEST_CONFIG_DIR ]]; then + sudo mkdir -p $TEMPEST_CONFIG_DIR + fi + sudo chown $STACK_USER $TEMPEST_CONFIG_DIR + cp $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG + chmod 644 $TEMPEST_CONFIG password=${ADMIN_PASSWORD:-secrete} From 6857dbb2b4fb40a2ed3362ba46f7b130a85b2de1 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Tue, 25 Feb 2014 11:02:44 +0400 Subject: [PATCH 0500/4438] Savanna: use heat infra engine if available In Savanna we have two provisioning engines: * "savanna" that directly work with nova/neutron/glance/cinder/etc and we'd like to deprecate it early in Juno release cycle, but it's still useful due to the fact that it could work ok w/o Heat; * "heat" engine uses Heat for provisioning of cluster resources, it's currently under active development and we're ready to make it default for OpenStack installations with enabled Heat. Change-Id: I937337b3921e9e51768a118fb4b6bd95962622bd --- lib/savanna | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/savanna b/lib/savanna index 954f0e711e..9feff236bc 100644 --- a/lib/savanna +++ b/lib/savanna @@ -135,6 +135,12 @@ function configure_savanna() { iniset $SAVANNA_CONF_FILE DEFAULT use_floating_ips true fi + if is_service_enabled heat; then + iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine heat + else + iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine savanna + fi + iniset $SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG recreate_database savanna utf8 From 45917cc4d941a530d75a84fa4dff738fe87f928b Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 24 Feb 2014 16:09:14 -0500 Subject: [PATCH 0501/4438] xtrace less we are xtrace happy, however that's just generating bulk in log files that are mostly ignorable. For the basically bullet proof functions we should not xtrace. Change-Id: Iab4e6d270c1546e0db2a06395cefcdf7f7929c3c --- functions-common | 85 ++++++++++++++++++++++++++++++++++++++++-------- stack.sh | 1 + 2 files changed, 72 insertions(+), 14 deletions(-) diff --git a/functions-common b/functions-common index d92e39cd91..79003fcfaf 100644 --- a/functions-common +++ b/functions-common @@ -39,59 +39,76 @@ set +o xtrace # Append a new option in an ini file without replacing the old value # iniadd config-file section option value1 value2 value3 ... function iniadd() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 shift 3 local values="$(iniget_multiline $file $section $option) $@" iniset_multiline $file $section $option $values + $xtrace } # Comment an option in an INI file # inicomment config-file section option function inicomment() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" + $xtrace } # Get an option from an INI file # iniget config-file section option function iniget() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 local line line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") echo ${line#*=} + $xtrace } # Get a multiple line option from an INI file # iniget_multiline config-file section option function iniget_multiline() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 local values values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file") echo ${values} + $xtrace } # Determinate is the given option present in the INI file # ini_has_option config-file section option function ini_has_option() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 local line line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + $xtrace [ -n "$line" ] } # Set an option in an INI file # iniset config-file section option value function iniset() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 @@ -113,11 +130,14 @@ $option = $value # Replace it sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" fi + $xtrace } # Set a multiple line option in an INI file # iniset_multiline config-file section option value1 value2 valu3 ... function iniset_multiline() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 @@ -142,15 +162,19 @@ function iniset_multiline() { $option = $v " "$file" done + $xtrace } # Uncomment an option in an INI file # iniuncomment config-file section option function iniuncomment() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file" + $xtrace } # Normalize config values to True or False @@ -158,6 +182,8 @@ function iniuncomment() { # Accepts as True: 1 yes Yes YES true True TRUE # VAR=$(trueorfalse default-value test-value) function trueorfalse() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local default=$1 local testval=$2 @@ -165,6 +191,7 @@ function trueorfalse() { [[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; } [[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; } echo "$default" + $xtrace } @@ -675,9 +702,14 @@ function _get_package_dir() { # Uses globals ``OFFLINE``, ``*_proxy`` # apt_get operation package [package ...] function apt_get() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace + [[ "$OFFLINE" = "True" || -z "$@" ]] && return local sudo="sudo" [[ "$(id -u)" = "0" ]] && sudo="env" + + $xtrace $sudo DEBIAN_FRONTEND=noninteractive \ http_proxy=$http_proxy https_proxy=$https_proxy \ no_proxy=$no_proxy \ @@ -695,6 +727,8 @@ function apt_get() { # - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection # of the package to the distros listed. The distro names are case insensitive. function get_packages() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local services=$@ local package_dir=$(_get_package_dir) local file_to_parse @@ -706,6 +740,7 @@ function get_packages() { fi if [[ -z "$DISTRO" ]]; then GetDistro + echo "Found Distro $DISTRO" fi for service in ${services//,/ }; do # Allow individual services to specify dependencies @@ -797,23 +832,30 @@ function get_packages() { done IFS=$OIFS done + $xtrace } # Distro-agnostic package installer # install_package package [package ...] function install_package() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace if is_ubuntu; then # if there are transient errors pulling the updates, that's fine. It may # be secondary repositories that we don't really care about. [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update || /bin/true NO_UPDATE_REPOS=True + $xtrace apt_get install "$@" elif is_fedora; then + $xtrace yum_install "$@" elif is_suse; then + $xtrace zypper_install "$@" else + $xtrace exit_distro_not_supported "installing packages" fi } @@ -1092,7 +1134,13 @@ function get_python_exec_prefix() { # ``TRACK_DEPENDS``, ``*_proxy`` # pip_install package [package ...] function pip_install { - [[ "$OFFLINE" = "True" || -z "$@" ]] && return + local xtrace=$(set +o | grep xtrace) + set +o xtrace + if [[ "$OFFLINE" = "True" || -z "$@" ]]; then + $xtrace + return + fi + if [[ -z "$os_PACKAGE" ]]; then GetOSVersion fi @@ -1121,6 +1169,7 @@ function pip_install { # this problem. See https://github.com/pypa/pip/issues/709 local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX) + $xtrace $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \ HTTP_PROXY=$http_proxy \ HTTPS_PROXY=$https_proxy \ @@ -1235,32 +1284,36 @@ function enable_service() { # Uses global ``ENABLED_SERVICES`` # is_service_enabled service [service ...] function is_service_enabled() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace + local enabled=1 services=$@ for service in ${services}; do - [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 + [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && enabled=0 # Look for top-level 'enabled' function for this service if type is_${service}_enabled >/dev/null 2>&1; then # A function exists for this service, use it is_${service}_enabled - return $? + enabled=$? fi # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled() # are implemented - [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && return 0 - [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0 - [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0 - [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0 - [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0 - [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && return 0 - [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 - [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && return 0 - [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0 - [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0 + [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && enabled=0 + [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && enabled=0 + [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && enabled=0 + [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && enabled=0 + [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && enabled=0 + [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && enabled=0 + [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && enabled=0 + [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && enabled=0 + [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && enabled=0 + [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && enabled=0 done - return 1 + $xtrace + return $enabled } # Toggle enable/disable_service for services that must run exclusive of each other @@ -1286,6 +1339,8 @@ function use_exclusive_service { # Only run the command if the target file (the last arg) is not on an # NFS filesystem. function _safe_permission_operation() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local args=( $@ ) local last local sudo_cmd @@ -1299,6 +1354,7 @@ function _safe_permission_operation() { fi if is_nfs_directory "$dir_to_check" ; then + $xtrace return 0 fi @@ -1308,6 +1364,7 @@ function _safe_permission_operation() { sudo_cmd="sudo" fi + $xtrace $sudo_cmd $@ } diff --git a/stack.sh b/stack.sh index ce19b8fc5c..0fdac3394a 100755 --- a/stack.sh +++ b/stack.sh @@ -529,6 +529,7 @@ if [[ -n "$LOGFILE" ]]; then if [[ "$VERBOSE" == "True" ]]; then # Redirect stdout/stderr to tee to write the log file exec 1> >( awk ' + /((set \+o$)|xtrace)/ { next } { cmd ="date +\"%Y-%m-%d %H:%M:%S \"" cmd | getline now From dd029da5b9b5600b8f6893247645db4fb0b95efe Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Mon, 24 Feb 2014 18:09:10 +0000 Subject: [PATCH 0502/4438] Create stack_domain_admin user Create an additional service user for Heat, which is a domain admin for the stack_user_domain - this is necessary since the normal service user cannot manage the projects/users in the stack_user_domain when keystone is configured to use the v3cloudsample policy (such as in gate integration tests). Change-Id: If59c11a74145b9bd02f78a7e0882afe1b0a72e40 --- lib/heat | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/heat b/lib/heat index af10fa6f1d..1b6dc86989 100644 --- a/lib/heat +++ b/lib/heat @@ -207,6 +207,16 @@ function create_heat_accounts() { --description "Owns users and projects created by heat" \ | grep ' id ' | get_field 2) iniset $HEAT_CONF DEFAULT stack_user_domain ${D_ID} + + openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \ + --os-identity-api-version=3 user create --password $SERVICE_PASSWORD \ + --domain $D_ID heat_domain_admin \ + --description "Manages users and projects created by heat" + openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \ + --os-identity-api-version=3 role add \ + --user ${U_ID} --domain ${D_ID} admin + iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin + iniset $HEAT_CONF DEFAULT stack_domain_admin_password $SERVICE_PASSWORD } # Restore xtrace From 78096b5073c70ef2c1f0626c802e095cd288c097 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 25 Feb 2014 10:23:04 -0500 Subject: [PATCH 0503/4438] remove sysstat & pidstat dstat is far cleaner for getting results out of the environment, and covers the bulk of our use cases for sysstat and pidstat with a much better ui. devstack is allowed to be opinionated, so become opinionated here. Change-Id: I21ec96339dcd704098512fdafd896738f352962d --- files/apts/sysstat | 1 - files/rpms-suse/sysstat | 1 - files/rpms/sysstat | 1 - stack.sh | 33 +--------------- tools/sar_filter.py | 86 ----------------------------------------- 5 files changed, 2 insertions(+), 120 deletions(-) delete mode 100644 files/apts/sysstat delete mode 100644 files/rpms-suse/sysstat delete mode 100644 files/rpms/sysstat delete mode 100755 tools/sar_filter.py diff --git a/files/apts/sysstat b/files/apts/sysstat deleted file mode 100644 index ea0c342d91..0000000000 --- a/files/apts/sysstat +++ /dev/null @@ -1 +0,0 @@ -sysstat diff --git a/files/rpms-suse/sysstat b/files/rpms-suse/sysstat deleted file mode 100644 index ea0c342d91..0000000000 --- a/files/rpms-suse/sysstat +++ /dev/null @@ -1 +0,0 @@ -sysstat diff --git a/files/rpms/sysstat b/files/rpms/sysstat deleted file mode 100644 index ea0c342d91..0000000000 --- a/files/rpms/sysstat +++ /dev/null @@ -1 +0,0 @@ -sysstat diff --git a/stack.sh b/stack.sh index 9f08e0f017..1d281587b1 100755 --- a/stack.sh +++ b/stack.sh @@ -294,15 +294,9 @@ SYSLOG=`trueorfalse False $SYSLOG` SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP} SYSLOG_PORT=${SYSLOG_PORT:-516} -# Enable sysstat logging -SYSSTAT_FILE=${SYSSTAT_FILE:-"sysstat.dat"} -SYSSTAT_INTERVAL=${SYSSTAT_INTERVAL:-"1"} - +# for DSTAT logging DSTAT_FILE=${DSTAT_FILE:-"dstat.txt"} -PIDSTAT_FILE=${PIDSTAT_FILE:-"pidstat.txt"} -PIDSTAT_INTERVAL=${PIDSTAT_INTERVAL:-"5"} - # Use color for logging output (only available if syslog is not used) LOG_COLOR=`trueorfalse True $LOG_COLOR` @@ -862,23 +856,9 @@ fi # Initialize the directory for service status check init_service_check - -# Sysstat and friends +# Dstat # ------- -# If enabled, systat has to start early to track OpenStack service startup. -# what we want to measure -# -u : cpu statitics -# -q : load -# -b : io load rates -# -w : process creation and context switch rates -SYSSTAT_OPTS="-u -q -b -w" -if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it sysstat "cd $TOP_DIR; ./tools/sar_filter.py $SYSSTAT_OPTS -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" -else - screen_it sysstat "./tools/sar_filter.py $SYSSTAT_OPTS $SYSSTAT_INTERVAL" -fi - # A better kind of sysstat, with the top process per time slice DSTAT_OPTS="-tcndylp --top-cpu-adv" if [[ -n ${SCREEN_LOGDIR} ]]; then @@ -887,15 +867,6 @@ else screen_it dstat "dstat $DSTAT_OPTS" fi -# Per-process stats -PIDSTAT_OPTS="-l -p ALL -T ALL" -if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it pidstat "cd $TOP_DIR; pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL > $SCREEN_LOGDIR/$PIDSTAT_FILE" -else - screen_it pidstat "pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL" -fi - - # Start Services # ============== diff --git a/tools/sar_filter.py b/tools/sar_filter.py deleted file mode 100755 index 24ef0e476c..0000000000 --- a/tools/sar_filter.py +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2014 Samsung Electronics Corp. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re -import subprocess -import sys - - -def is_data_line(line): - timestamp, data = parse_line(line) - return re.search('\d\.d', data) - - -def parse_line(line): - m = re.search('(\d\d:\d\d:\d\d( \w\w)?)(\s+((\S+)\s*)+)', line) - if m: - date = m.group(1) - data = m.group(3).rstrip() - return date, data - else: - return None, None - - -process = subprocess.Popen( - "sar %s" % " ".join(sys.argv[1:]), - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - -# Poll process for new output until finished - -start_time = "" -header = "" -data_line = "" -printed_header = False -current_ts = None - -# print out the first sysstat line regardless -print process.stdout.readline() - -while True: - nextline = process.stdout.readline() - if nextline == '' and process.poll() is not None: - break - - date, data = parse_line(nextline) - # stop until we get to the first set of real lines - if not date: - continue - - # now we eat the header lines, and only print out the header - # if we've never seen them before - if not start_time: - start_time = date - header += "%s %s" % (date, data) - elif date == start_time: - header += " %s" % data - elif not printed_header: - printed_header = True - print header - - # now we know this is a data line, printing out if the timestamp - # has changed, and stacking up otherwise. - nextline = process.stdout.readline() - date, data = parse_line(nextline) - if date != current_ts: - current_ts = date - print data_line - data_line = "%s %s" % (date, data) - else: - data_line += " %s" % data - - sys.stdout.flush() From b8e250232ec55b946d2fd7e4237f12632408bdcc Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 21 Feb 2014 16:14:29 +1100 Subject: [PATCH 0504/4438] Add end-of-file checks to bash8 Add two end-of-file checks to bash8. Firstly, alert if heredoc hasn't finished. Some heredocs were done like: --- sudo bash -c "cat < foo ... EOF" --- (A better way to do this is "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla + cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla + cat < $rules_dir/50-libvirt-$STACK_USER.rules + cat < Date: Wed, 26 Feb 2014 11:16:09 +1100 Subject: [PATCH 0505/4438] Run yum repolist commands as root Otherwise you get yum errors like [1] when you run stack.sh as !root. The solution is to run yum commands as root so it can access the right certs [1] https://access.redhat.com/site/solutions/312413 Change-Id: I54b0df13508c50aba67e23da11953c536933917a --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 9cdf2648f1..b3c507b600 100755 --- a/stack.sh +++ b/stack.sh @@ -181,7 +181,7 @@ if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then # Installing Open vSwitch on RHEL6 requires enabling the RDO repo. RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack-havana/rdo-release-havana.rpm"} RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-havana"} - if ! yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then + if ! sudo yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then echo "RDO repo not detected; installing" yum_install $RHEL6_RDO_REPO_RPM || \ die $LINENO "Error installing RDO repo, cannot continue" @@ -189,7 +189,7 @@ if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then # RHEL6 requires EPEL for many Open Stack dependencies RHEL6_EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"} - if ! yum repolist enabled epel | grep -q 'epel'; then + if ! sudo yum repolist enabled epel | grep -q 'epel'; then echo "EPEL not detected; installing" yum_install ${RHEL6_EPEL_RPM} || \ die $LINENO "Error installing EPEL repo, cannot continue" From 3e37326a3566ac38ea7ccf053fc183b7a8fccc08 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 26 Feb 2014 13:29:31 +1100 Subject: [PATCH 0506/4438] Move enablement of rhel6-optional repo earlier Change 4f1fee6eae300a3384900df06ebc857e95854eb0 added the RHEL6 optional repo in fixup_stuff.sh, but it turns out that doesn't get run until after the package prerequisites phase. Move this into stack.sh with the RDO repo setup. Change-Id: Iae0df85fa94c6c1b6f497dd29fda90d03b903a41 --- stack.sh | 4 ++++ tools/fixup_stuff.sh | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index 9cdf2648f1..217afbc2e3 100755 --- a/stack.sh +++ b/stack.sh @@ -194,6 +194,10 @@ if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then yum_install ${RHEL6_EPEL_RPM} || \ die $LINENO "Error installing EPEL repo, cannot continue" fi + + # ... and also optional to be enabled + sudo yum-config-manager --enable rhel-6-server-optional-rpms + fi diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 048024a325..47b0cd10cd 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -98,10 +98,6 @@ if [[ $DISTRO =~ (rhel6) ]]; then sudo setenforce 0 fi - # make sure we have the "optional" repo enabled; it provides some - # packages like libffi-devel for example - sudo yum-config-manager --enable rhel-6-server-optional-rpms - # If the ``dbus`` package was installed by DevStack dependencies the # uuid may not be generated because the service was never started (PR#598200), # causing Nova to stop later on complaining that ``/var/lib/dbus/machine-id`` From 201850120bec762347b80b22b5c60df43a262c6e Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Mon, 3 Feb 2014 12:14:08 -0500 Subject: [PATCH 0507/4438] Make python-guestfs NOPRIME & install for libvirt The libguestfs dependency tree includes a number of packages that we may not want or work everywhere, such as fuse. Now python-(lib)guestfs will install from lib/nova_plugins/hypervisor-libvirt Change-Id: I6c3a614010ee8d65813eec66a56680def622514c --- files/apts/n-cpu | 2 +- files/rpms/n-cpu | 2 +- lib/nova_plugins/hypervisor-libvirt | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/files/apts/n-cpu b/files/apts/n-cpu index b287107256..a82304dfe2 100644 --- a/files/apts/n-cpu +++ b/files/apts/n-cpu @@ -5,4 +5,4 @@ open-iscsi-utils # Deprecated since quantal dist:precise genisoimage sysfsutils sg3-utils -python-guestfs +python-guestfs # NOPRIME diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index e4fdaf4eda..32b1546c39 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -4,4 +4,4 @@ lvm2 genisoimage sysfsutils sg3_utils -python-libguestfs +python-libguestfs # NOPRIME diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 415244ffae..7f0880494e 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -140,10 +140,12 @@ function install_nova_hypervisor() { install_package kvm install_package libvirt-bin install_package python-libvirt + install_package python-guestfs elif is_fedora || is_suse; then install_package kvm install_package libvirt install_package libvirt-python + install_package python-libguestfs fi # Install and configure **LXC** if specified. LXC is another approach to From 06ba5193bebe27b2d7ead2d31ed9171885c6a5d8 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Wed, 26 Feb 2014 13:46:56 +1000 Subject: [PATCH 0508/4438] Insecure check if keystone is up If we start keystone with an SSL endpoint then the curl check to see if it is running will fail because it cannot create a secure connection. This check can be done insecurely as all we care about is that the service has started. Change-Id: I826753d4d46e9956f443110029346bc70282951a --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index cebb4d3522..73af1d356d 100644 --- a/lib/keystone +++ b/lib/keystone @@ -424,7 +424,7 @@ function start_keystone() { fi echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -k -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then die $LINENO "keystone did not start" fi From 3b57829ece7aa231770b640afd6da961dae2fc1c Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 26 Feb 2014 14:52:02 +1100 Subject: [PATCH 0509/4438] Don't use --skip-redirect for cinder restart on rhel6 RHEL6 doesn't support this flag so the restart fails. Not exactly sure why it is required, seems unchagned from the initial commit 67787e6b4c6f31388cbee6d83b67371b31c443d4 (found running stack.sh with -e per [1]) [1] https://review.openstack.org/#/c/71996/ Change-Id: Ib34c3663409d7b96b932286cb5a6974e940075d3 --- lib/cinder | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index c8c90c098d..e8f30b683c 100644 --- a/lib/cinder +++ b/lib/cinder @@ -496,8 +496,12 @@ function start_cinder() { sudo stop tgt || true sudo start tgt elif is_fedora; then - # bypass redirection to systemctl during restart - sudo /sbin/service --skip-redirect tgtd restart + if [[ $DISTRO =~ (rhel6) ]]; then + sudo /sbin/service tgtd restart + else + # bypass redirection to systemctl during restart + sudo /sbin/service --skip-redirect tgtd restart + fi elif is_suse; then restart_service tgtd else From 1755f689e807cd73b7bb2c67ac0531afbc8c6448 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 26 Feb 2014 13:08:00 -0600 Subject: [PATCH 0510/4438] Fix heat role create error https://review.openstack.org/#/c/76036/ changed the user creat commands, missed the argument to --user Change-Id: Iaf10ef80a2fb0227dd66a314e7ec253dfb4dc4fe --- lib/heat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index 1b6dc86989..972c35ce72 100644 --- a/lib/heat +++ b/lib/heat @@ -214,7 +214,7 @@ function create_heat_accounts() { --description "Manages users and projects created by heat" openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \ --os-identity-api-version=3 role add \ - --user ${U_ID} --domain ${D_ID} admin + --user heat_domain_admin --domain ${D_ID} admin iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin iniset $HEAT_CONF DEFAULT stack_domain_admin_password $SERVICE_PASSWORD } From a25a6f6d80cb844f13540fecf616b289c42e3ebe Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 24 Feb 2014 16:03:41 -0600 Subject: [PATCH 0511/4438] Unbuffer log output * Force-flush log output so we don't lose log output in certain error cases. * Slow down exit paths: add sleep to die(), wait until last moment to kill child processes (including the awk log output filter) Change-Id: I1620fd33b89b237d9c2bb6206f3de2c81719f676 --- functions-common | 2 ++ stack.sh | 26 +++++++++++++++----------- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/functions-common b/functions-common index 79003fcfaf..4bc3bbaac5 100644 --- a/functions-common +++ b/functions-common @@ -222,6 +222,8 @@ function die() { fi backtrace 2 err $line "$*" + # Give buffers a second to flush + sleep 1 exit $exitcode } diff --git a/stack.sh b/stack.sh index 22a418f306..c95199769f 100755 --- a/stack.sh +++ b/stack.sh @@ -522,7 +522,7 @@ if [[ -n "$LOGFILE" ]]; then exec 3>&1 if [[ "$VERBOSE" == "True" ]]; then # Redirect stdout/stderr to tee to write the log file - exec 1> >( awk ' + exec 1> >( awk -v logfile=${LOGFILE} ' /((set \+o$)|xtrace)/ { next } { cmd ="date +\"%Y-%m-%d %H:%M:%S.%3N | \"" @@ -530,8 +530,9 @@ if [[ -n "$LOGFILE" ]]; then close("date +\"%Y-%m-%d %H:%M:%S.%3N | \"") sub(/^/, now) print - fflush() - }' | tee "${LOGFILE}" ) 2>&1 + print > logfile + fflush("") + }' ) 2>&1 # Set up a second fd for output exec 6> >( tee "${SUMFILE}" ) else @@ -579,21 +580,24 @@ fi # ----------------------- # Kill background processes on exit -trap clean EXIT -clean() { +trap exit_trap EXIT +function exit_trap { local r=$? - kill >/dev/null 2>&1 $(jobs -p) + echo "exit_trap called, cleaning up child processes" + kill 2>&1 $(jobs -p) exit $r } - # Exit on any errors so that errors don't compound -trap failed ERR -failed() { +trap err_trap ERR +function err_trap { local r=$? - kill >/dev/null 2>&1 $(jobs -p) set +o xtrace - [ -n "$LOGFILE" ] && echo "${0##*/} failed: full log in $LOGFILE" + if [[ -n "$LOGFILE" ]]; then + echo "${0##*/} failed: full log in $LOGFILE" + else + echo "${0##*/} failed" + fi exit $r } From 09bd7c8fd5a662ef697eb61638efbe862a4875a6 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 3 Feb 2014 08:35:26 +0900 Subject: [PATCH 0512/4438] enable -o errexit devstack should run under -o errexit to ensure that we fail early when something has gone wrong, otherwise determination of the root failure location is often quite challenging. this clears all the normal use cases for devstack, there could be tests which now die early, which we're going to have to check for later. Change-Id: Ibd828c4f4fd95a60d3918d3d7ae90e10649479ab --- functions-common | 3 ++- stack.sh | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 4bc3bbaac5..3e29e8c7de 100644 --- a/functions-common +++ b/functions-common @@ -1094,7 +1094,8 @@ function service_check() { fi # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME - failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null` + # make this -o errexit safe + failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null || /bin/true` for service in $failures; do service=`basename $service` diff --git a/stack.sh b/stack.sh index c95199769f..d43a73a889 100755 --- a/stack.sh +++ b/stack.sh @@ -601,6 +601,9 @@ function err_trap { exit $r } + +set -o errexit + # Print the commands being run so that we can see the command that triggers # an error. It is also useful for following along as the install occurs. set -o xtrace From a42650fb7e4d3fc8853f04d84109199fa1d9f5e4 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 27 Feb 2014 13:08:30 +0100 Subject: [PATCH 0513/4438] Fix libvirt polkit settings After the https://review.openstack.org/#/c/75314 merged the /etc/polkit-1/rules.d/50-libvirt-stack.rules files contains subject.user == '"stack"' instead of subject.user == 'stack'. Change-Id: I09f252b2d0e53f012facb9f7eaa21c1e1bdf492b --- lib/nova_plugins/hypervisor-libvirt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index a550600363..dc999edfe9 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -87,7 +87,7 @@ EOF cat < Date: Thu, 27 Feb 2014 11:13:36 -0600 Subject: [PATCH 0514/4438] Fix exit_trap() error if no child processes Bug-Id: 1285776 Change-Id: Iad7a9f2c03cc39159beda55345f232cefed10520 --- stack.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 4333fb2c7e..f4342dd206 100755 --- a/stack.sh +++ b/stack.sh @@ -587,8 +587,11 @@ fi trap exit_trap EXIT function exit_trap { local r=$? - echo "exit_trap called, cleaning up child processes" - kill 2>&1 $(jobs -p) + jobs=$(jobs -p) + if [[ -n $jobs ]]; then + echo "exit_trap: cleaning up child processes" + kill 2>&1 $jobs + fi exit $r } From 83b6c99b503dced1e92761e1de8ceaf23a396453 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 27 Feb 2014 12:41:28 -0600 Subject: [PATCH 0515/4438] Handle non-zero exit code from git diff The check for a changed repo in setup_develop() 'git diff --exit-code' returns a status of 1 when the repo has changes; trap that so errexit does not abort the script. Bug-Id: 1285780 Change-Id: Ic97e68348f46245b271567893b447fcedbd7bd6e --- functions-common | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/functions-common b/functions-common index 8e6b2b1895..0d85068a2f 100644 --- a/functions-common +++ b/functions-common @@ -1223,14 +1223,12 @@ function pip_install { function setup_develop() { local project_dir=$1 - echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" - # Don't update repo if local changes exist # Don't use buggy "git diff --quiet" - (cd $project_dir && git diff --exit-code >/dev/null) - local update_requirements=$? + # ``errexit`` requires us to trap the exit code when the repo is changed + local update_requirements=$(cd $project_dir && git diff --exit-code >/dev/null || echo "changed") - if [ $update_requirements -eq 0 ]; then + if [[ $update_requirements = "changed" ]]; then (cd $REQUIREMENTS_DIR; \ $SUDO_CMD python update.py $project_dir) fi @@ -1246,7 +1244,7 @@ function setup_develop() { # a variable that tells us whether or not we should UNDO the requirements # changes (this will be set to False in the OpenStack ci gate) if [ $UNDO_REQUIREMENTS = "True" ]; then - if [ $update_requirements -eq 0 ]; then + if [[ $update_requirements = "changed" ]]; then (cd $project_dir && git reset --hard) fi fi From 657ce7fa213b680904c07f09029467d8a195761d Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Thu, 27 Feb 2014 10:50:38 -0800 Subject: [PATCH 0516/4438] Stop trying to create the 'ironic' user twice After 09bd7c8fd5a6 landed, a conflict between lib/ironic and extras.d/50-ironic.sh was exposed, breaking Ironic's check and gate tests. This resolves that conflict by only creating the 'ironic' user once. Change-Id: Ic41517f0977c84a82f92f58565aaee6b5cc7eb3e --- lib/ironic | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/ironic b/lib/ironic index 607b13125a..f4454749dc 100644 --- a/lib/ironic +++ b/lib/ironic @@ -186,9 +186,6 @@ function init_ironic() { $IRONIC_BIN_DIR/ironic-dbsync create_ironic_cache_dir - - # Create keystone artifacts for Ironic. - create_ironic_accounts } # start_ironic() - Start running processes, including screen From aee18c749b0e3a1a3a6907a33db76ae83b8d41d9 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 21 Feb 2014 15:35:08 +1100 Subject: [PATCH 0517/4438] Enforce function declaration format in bash8 Check that function calls look like ^function foo {$ in bash8, and fix all existing failures of that check. Add a note to HACKING.rst Change-Id: Ic19eecb39e0b20273d1bcd551a42fe400d54e938 --- HACKING.rst | 2 + driver_certs/cinder_driver_cert.sh | 2 +- exercises/aggregates.sh | 2 +- exercises/client-args.sh | 2 +- exercises/client-env.sh | 2 +- exercises/neutron-adv-test.sh | 6 +- functions | 22 ++-- functions-common | 122 ++++++++++---------- lib/apache | 14 +-- lib/baremetal | 22 ++-- lib/ceilometer | 18 +-- lib/cinder | 28 ++--- lib/cinder_plugins/XenAPINFS | 2 +- lib/cinder_plugins/glusterfs | 2 +- lib/cinder_plugins/nfs | 2 +- lib/cinder_plugins/sheepdog | 2 +- lib/cinder_plugins/solidfire | 2 +- lib/cinder_plugins/vsphere | 2 +- lib/config | 8 +- lib/gantt | 14 +-- lib/glance | 16 +-- lib/heat | 18 +-- lib/horizon | 14 +-- lib/infra | 4 +- lib/ironic | 26 ++--- lib/keystone | 22 ++-- lib/ldap | 14 +-- lib/marconi | 20 ++-- lib/neutron | 85 +++++++------- lib/neutron_plugins/bigswitch_floodlight | 22 ++-- lib/neutron_plugins/brocade | 22 ++-- lib/neutron_plugins/cisco | 42 +++---- lib/neutron_plugins/embrane | 6 +- lib/neutron_plugins/linuxbridge | 6 +- lib/neutron_plugins/linuxbridge_agent | 18 +-- lib/neutron_plugins/midonet | 24 ++-- lib/neutron_plugins/ml2 | 8 +- lib/neutron_plugins/nec | 26 ++--- lib/neutron_plugins/openvswitch | 6 +- lib/neutron_plugins/openvswitch_agent | 16 +-- lib/neutron_plugins/ovs_base | 16 +-- lib/neutron_plugins/plumgrid | 16 +-- lib/neutron_plugins/ryu | 22 ++-- lib/neutron_plugins/services/firewall | 6 +- lib/neutron_plugins/services/loadbalancer | 8 +- lib/neutron_plugins/services/metering | 6 +- lib/neutron_plugins/services/vpn | 6 +- lib/neutron_plugins/vmware_nsx | 26 ++--- lib/neutron_thirdparty/bigswitch_floodlight | 12 +- lib/neutron_thirdparty/midonet | 12 +- lib/neutron_thirdparty/ryu | 12 +- lib/neutron_thirdparty/trema | 16 +-- lib/neutron_thirdparty/vmware_nsx | 12 +- lib/nova | 34 +++--- lib/nova_plugins/hypervisor-baremetal | 10 +- lib/nova_plugins/hypervisor-docker | 10 +- lib/nova_plugins/hypervisor-fake | 10 +- lib/nova_plugins/hypervisor-libvirt | 10 +- lib/nova_plugins/hypervisor-openvz | 10 +- lib/nova_plugins/hypervisor-vsphere | 10 +- lib/nova_plugins/hypervisor-xenserver | 10 +- lib/oslo | 4 +- lib/rpc_backend | 10 +- lib/savanna | 12 +- lib/savanna-dashboard | 8 +- lib/stackforge | 4 +- lib/swift | 24 ++-- lib/tempest | 6 +- lib/template | 12 +- lib/tls | 20 ++-- lib/trove | 20 ++-- stack.sh | 6 +- tests/functions.sh | 6 +- tests/test_config.sh | 6 +- tools/bash8.py | 16 +++ tools/build_pxe_env.sh | 2 +- tools/build_ramdisk.sh | 4 +- tools/build_uec_ramdisk.sh | 2 +- tools/build_usb_boot.sh | 2 +- tools/copy_dev_environment_to_uec.sh | 2 +- tools/create_userrc.sh | 11 +- tools/fixup_stuff.sh | 2 +- tools/get_uec_image.sh | 4 +- tools/info.sh | 2 +- tools/install_openvpn.sh | 8 +- tools/install_pip.sh | 6 +- tools/jenkins/build_configuration.sh | 2 +- tools/jenkins/configurations/kvm.sh | 2 +- tools/jenkins/configurations/xs.sh | 2 +- tools/jenkins/run_test.sh | 2 +- tools/warm_apts_for_uec.sh | 2 +- tools/xen/build_xva.sh | 4 +- tools/xen/install_os_domU.sh | 6 +- tools/xen/prepare_guest.sh | 2 +- 94 files changed, 601 insertions(+), 585 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index 103b579621..5c15537915 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -275,3 +275,5 @@ Variables and Functions - local variables should be lower case, global variables should be upper case - function names should_have_underscores, NotCamelCase. +- functions should be declared as per the regex ^function foo {$ + with code starting on the next line diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh index e45b7f8736..d2c636f89d 100755 --- a/driver_certs/cinder_driver_cert.sh +++ b/driver_certs/cinder_driver_cert.sh @@ -32,7 +32,7 @@ source $TOP_DIR/lib/cinder TEMPFILE=`mktemp` RECLONE=True -function log_message() { +function log_message { MESSAGE=$1 STEP_HEADER=$2 if [[ "$STEP_HEADER" = "True" ]]; then diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index d223301f35..01d548d1f2 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -57,7 +57,7 @@ AGGREGATE_NAME=test_aggregate_$RANDOM AGGREGATE2_NAME=test_aggregate_$RANDOM AGGREGATE_A_ZONE=nova -exit_if_aggregate_present() { +function exit_if_aggregate_present { aggregate_name=$1 if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then diff --git a/exercises/client-args.sh b/exercises/client-args.sh index e79774f98c..b360f1e86a 100755 --- a/exercises/client-args.sh +++ b/exercises/client-args.sh @@ -154,7 +154,7 @@ set +o xtrace # Results # ======= -function report() { +function report { if [[ -n "$2" ]]; then echo "$1: $2" fi diff --git a/exercises/client-env.sh b/exercises/client-env.sh index 6c6fe12282..d955e4d1e1 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -165,7 +165,7 @@ set +o xtrace # Results # ======= -function report() { +function report { if [[ -n "$2" ]]; then echo "$1: $2" fi diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index a9199e62a6..0a24fe9df7 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -20,7 +20,7 @@ echo "*********************************************************************" set -o errtrace trap failed ERR -failed() { +function failed { local r=$? set +o errtrace set +o xtrace @@ -395,7 +395,7 @@ function test_functions { # Usage and main # -------------- -usage() { +function usage { echo "$0: [-h]" echo " -h, --help Display help message" echo " -t, --tenant Create tenants" @@ -408,7 +408,7 @@ usage() { echo " -T, --test Test functions" } -main() { +function main { echo Description diff --git a/functions b/functions index 3101111c63..43639c79fb 100644 --- a/functions +++ b/functions @@ -51,7 +51,7 @@ function cleanup_tmp { # - ``GLANCE_HOSTPORT`` # # upload_image image-url glance-token -function upload_image() { +function upload_image { local image_url=$1 local token=$2 @@ -341,7 +341,7 @@ function use_database { # Wait for an HTTP server to start answering requests # wait_for_service timeout url -function wait_for_service() { +function wait_for_service { local timeout=$1 local url=$2 timeout $timeout sh -c "while ! curl --noproxy '*' -s $url >/dev/null; do sleep 1; done" @@ -351,7 +351,7 @@ function wait_for_service() { # ping check # Uses globals ``ENABLED_SERVICES`` # ping_check from-net ip boot-timeout expected -function ping_check() { +function ping_check { if is_service_enabled neutron; then _ping_check_neutron "$1" $2 $3 $4 return @@ -361,7 +361,7 @@ function ping_check() { # ping check for nova # Uses globals ``MULTI_HOST``, ``PRIVATE_NETWORK`` -function _ping_check_novanet() { +function _ping_check_novanet { local from_net=$1 local ip=$2 local boot_timeout=$3 @@ -386,7 +386,7 @@ function _ping_check_novanet() { } # Get ip of instance -function get_instance_ip(){ +function get_instance_ip { local vm_id=$1 local network_name=$2 local nova_result="$(nova show $vm_id)" @@ -401,7 +401,7 @@ function get_instance_ip(){ # ssh check # ssh_check net-name key-file floating-ip default-user active-timeout -function ssh_check() { +function ssh_check { if is_service_enabled neutron; then _ssh_check_neutron "$1" $2 $3 $4 $5 return @@ -409,7 +409,7 @@ function ssh_check() { _ssh_check_novanet "$1" $2 $3 $4 $5 } -function _ssh_check_novanet() { +function _ssh_check_novanet { local NET_NAME=$1 local KEY_FILE=$2 local FLOATING_IP=$3 @@ -425,7 +425,7 @@ function _ssh_check_novanet() { # Get the location of the $module-rootwrap executables, where module is cinder # or nova. # get_rootwrap_location module -function get_rootwrap_location() { +function get_rootwrap_location { local module=$1 echo "$(get_python_exec_prefix)/$module-rootwrap" @@ -434,7 +434,7 @@ function get_rootwrap_location() { # Path permissions sanity check # check_path_perm_sanity path -function check_path_perm_sanity() { +function check_path_perm_sanity { # Ensure no element of the path has 0700 permissions, which is very # likely to cause issues for daemons. Inspired by default 0700 # homedir permissions on RHEL and common practice of making DEST in @@ -505,7 +505,7 @@ function _vercmp_r { # The above will return "0", as the versions are equal. # # vercmp_numbers ver1 ver2 -vercmp_numbers() { +function vercmp_numbers { typeset v1=$1 v2=$2 sep typeset -a ver1 ver2 @@ -523,7 +523,7 @@ vercmp_numbers() { # Defaults are respectively 'project_name' and 'user_name' # # setup_colorized_logging something.conf SOMESECTION -function setup_colorized_logging() { +function setup_colorized_logging { local conf_file=$1 local conf_section=$2 local project_var=${3:-"project_name"} diff --git a/functions-common b/functions-common index 2248fbb610..eba4985e40 100644 --- a/functions-common +++ b/functions-common @@ -38,7 +38,7 @@ set +o xtrace # Append a new option in an ini file without replacing the old value # iniadd config-file section option value1 value2 value3 ... -function iniadd() { +function iniadd { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -52,7 +52,7 @@ function iniadd() { # Comment an option in an INI file # inicomment config-file section option -function inicomment() { +function inicomment { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -64,7 +64,7 @@ function inicomment() { # Get an option from an INI file # iniget config-file section option -function iniget() { +function iniget { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -78,7 +78,7 @@ function iniget() { # Get a multiple line option from an INI file # iniget_multiline config-file section option -function iniget_multiline() { +function iniget_multiline { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -92,7 +92,7 @@ function iniget_multiline() { # Determinate is the given option present in the INI file # ini_has_option config-file section option -function ini_has_option() { +function ini_has_option { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -106,7 +106,7 @@ function ini_has_option() { # Set an option in an INI file # iniset config-file section option value -function iniset() { +function iniset { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -135,7 +135,7 @@ $option = $value # Set a multiple line option in an INI file # iniset_multiline config-file section option value1 value2 valu3 ... -function iniset_multiline() { +function iniset_multiline { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -167,7 +167,7 @@ $option = $v # Uncomment an option in an INI file # iniuncomment config-file section option -function iniuncomment() { +function iniuncomment { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -181,7 +181,7 @@ function iniuncomment() { # Accepts as False: 0 no No NO false False FALSE # Accepts as True: 1 yes Yes YES true True TRUE # VAR=$(trueorfalse default-value test-value) -function trueorfalse() { +function trueorfalse { local xtrace=$(set +o | grep xtrace) set +o xtrace local default=$1 @@ -213,7 +213,7 @@ function backtrace { # Prints line number and "message" then exits # die $LINENO "message" -function die() { +function die { local exitcode=$? set +o xtrace local line=$1; shift @@ -231,7 +231,7 @@ function die() { # exit code is non-zero and prints "message" and exits # NOTE: env-var is the variable name without a '$' # die_if_not_set $LINENO env-var "message" -function die_if_not_set() { +function die_if_not_set { local exitcode=$? FXTRACE=$(set +o | grep xtrace) set +o xtrace @@ -245,7 +245,7 @@ function die_if_not_set() { # Prints line number and "message" in error format # err $LINENO "message" -function err() { +function err { local exitcode=$? errXTRACE=$(set +o | grep xtrace) set +o xtrace @@ -262,7 +262,7 @@ function err() { # exit code is non-zero and prints "message" # NOTE: env-var is the variable name without a '$' # err_if_not_set $LINENO env-var "message" -function err_if_not_set() { +function err_if_not_set { local exitcode=$? errinsXTRACE=$(set +o | grep xtrace) set +o xtrace @@ -291,14 +291,14 @@ function exit_distro_not_supported { # Test if the named environment variable is set and not zero length # is_set env-var -function is_set() { +function is_set { local var=\$"$1" eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this } # Prints line number and "message" in warning format # warn $LINENO "message" -function warn() { +function warn { local exitcode=$? errXTRACE=$(set +o | grep xtrace) set +o xtrace @@ -324,7 +324,7 @@ function warn() { # os_PACKAGE - package type # os_CODENAME - vendor's codename for release # GetOSVersion -GetOSVersion() { +function GetOSVersion { # Figure out which vendor we are if [[ -x "`which sw_vers 2>/dev/null`" ]]; then # OS/X @@ -414,7 +414,7 @@ GetOSVersion() { # Translate the OS version values into common nomenclature # Sets global ``DISTRO`` from the ``os_*`` values -function GetDistro() { +function GetDistro { GetOSVersion if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then # 'Everyone' refers to Ubuntu / Debian releases by the code name adjective @@ -491,7 +491,7 @@ function is_ubuntu { # Returns openstack release name for a given branch name # ``get_release_name_from_branch branch-name`` -function get_release_name_from_branch(){ +function get_release_name_from_branch { local branch=$1 if [[ $branch =~ "stable/" ]]; then echo ${branch#*/} @@ -577,7 +577,7 @@ function git_clone { # to timeout(1); otherwise the default value of 0 maintains the status # quo of waiting forever. # usage: git_timed -function git_timed() { +function git_timed { local count=0 local timeout=0 @@ -603,7 +603,7 @@ function git_timed() { # git update using reference as a branch. # git_update_branch ref -function git_update_branch() { +function git_update_branch { GIT_BRANCH=$1 @@ -615,7 +615,7 @@ function git_update_branch() { # git update using reference as a branch. # git_update_remote_branch ref -function git_update_remote_branch() { +function git_update_remote_branch { GIT_BRANCH=$1 @@ -625,7 +625,7 @@ function git_update_remote_branch() { # git update using reference as a tag. Be careful editing source at that repo # as working copy will be in a detached mode # git_update_tag ref -function git_update_tag() { +function git_update_tag { GIT_TAG=$1 @@ -641,7 +641,7 @@ function git_update_tag() { # Get the default value for HOST_IP # get_default_host_ip fixed_range floating_range host_ip_iface host_ip -function get_default_host_ip() { +function get_default_host_ip { local fixed_range=$1 local floating_range=$2 local host_ip_iface=$3 @@ -673,7 +673,7 @@ function get_default_host_ip() { # Fields are numbered starting with 1 # Reverse syntax is supported: -1 is the last field, -2 is second to last, etc. # get_field field-number -function get_field() { +function get_field { while read data; do if [ "$1" -lt 0 ]; then field="(\$(NF$1))" @@ -687,7 +687,7 @@ function get_field() { # Add a policy to a policy.json file # Do nothing if the policy already exists # ``policy_add policy_file policy_name policy_permissions`` -function policy_add() { +function policy_add { local policy_file=$1 local policy_name=$2 local policy_perm=$3 @@ -717,7 +717,7 @@ function policy_add() { # ================= # _get_package_dir -function _get_package_dir() { +function _get_package_dir { local pkg_dir if is_ubuntu; then pkg_dir=$FILES/apts @@ -734,7 +734,7 @@ function _get_package_dir() { # Wrapper for ``apt-get`` to set cache and proxy environment variables # Uses globals ``OFFLINE``, ``*_proxy`` # apt_get operation package [package ...] -function apt_get() { +function apt_get { local xtrace=$(set +o | grep xtrace) set +o xtrace @@ -759,7 +759,7 @@ function apt_get() { # - ``# NOPRIME`` defers installation to be performed later in `stack.sh` # - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection # of the package to the distros listed. The distro names are case insensitive. -function get_packages() { +function get_packages { local xtrace=$(set +o | grep xtrace) set +o xtrace local services=$@ @@ -870,7 +870,7 @@ function get_packages() { # Distro-agnostic package installer # install_package package [package ...] -function install_package() { +function install_package { local xtrace=$(set +o | grep xtrace) set +o xtrace if is_ubuntu; then @@ -895,7 +895,7 @@ function install_package() { # Distro-agnostic function to tell if a package is installed # is_package_installed package [package ...] -function is_package_installed() { +function is_package_installed { if [[ -z "$@" ]]; then return 1 fi @@ -915,7 +915,7 @@ function is_package_installed() { # Distro-agnostic package uninstaller # uninstall_package package [package ...] -function uninstall_package() { +function uninstall_package { if is_ubuntu; then apt_get purge "$@" elif is_fedora; then @@ -930,7 +930,7 @@ function uninstall_package() { # Wrapper for ``yum`` to set proxy environment variables # Uses globals ``OFFLINE``, ``*_proxy`` # yum_install package [package ...] -function yum_install() { +function yum_install { [[ "$OFFLINE" = "True" ]] && return local sudo="sudo" [[ "$(id -u)" = "0" ]] && sudo="env" @@ -941,7 +941,7 @@ function yum_install() { # zypper wrapper to set arguments correctly # zypper_install package [package ...] -function zypper_install() { +function zypper_install { [[ "$OFFLINE" = "True" ]] && return local sudo="sudo" [[ "$(id -u)" = "0" ]] && sudo="env" @@ -958,7 +958,7 @@ function zypper_install() { # files to produce the same logs as screen_it(). The log filename is derived # from the service name and global-and-now-misnamed SCREEN_LOGDIR # _run_process service "command-line" -function _run_process() { +function _run_process { local service=$1 local command="$2" @@ -983,7 +983,7 @@ function _run_process() { # Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``. # This is used for ``service_check`` when all the ``screen_it`` are called finished # init_service_check -function init_service_check() { +function init_service_check { SCREEN_NAME=${SCREEN_NAME:-stack} SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} @@ -996,7 +996,7 @@ function init_service_check() { # Find out if a process exists by partial name. # is_running name -function is_running() { +function is_running { local name=$1 ps auxw | grep -v grep | grep ${name} > /dev/null RC=$? @@ -1009,7 +1009,7 @@ function is_running() { # of screen_it() without screen. PIDs are written to # $SERVICE_DIR/$SCREEN_NAME/$service.pid # run_process service "command-line" -function run_process() { +function run_process { local service=$1 local command="$2" @@ -1092,7 +1092,7 @@ function screen_rc { # If screen is being used kill the screen window; this will catch processes # that did not leave a PID behind # screen_stop service -function screen_stop() { +function screen_stop { SCREEN_NAME=${SCREEN_NAME:-stack} SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} USE_SCREEN=$(trueorfalse True $USE_SCREEN) @@ -1112,7 +1112,7 @@ function screen_stop() { # Helper to get the status of each running service # service_check -function service_check() { +function service_check { local service local failures SCREEN_NAME=${SCREEN_NAME:-stack} @@ -1145,7 +1145,7 @@ function service_check() { # Get the path to the pip command. # get_pip_command -function get_pip_command() { +function get_pip_command { which pip || which pip-python if [ $? -ne 0 ]; then @@ -1155,7 +1155,7 @@ function get_pip_command() { # Get the path to the direcotry where python executables are installed. # get_python_exec_prefix -function get_python_exec_prefix() { +function get_python_exec_prefix { if is_fedora || is_suse; then echo "/usr/bin" else @@ -1221,7 +1221,7 @@ function pip_install { # # Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``, ``UNDO_REQUIREMENTS`` # setup_develop directory -function setup_develop() { +function setup_develop { local project_dir=$1 echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" @@ -1257,7 +1257,7 @@ function setup_develop() { # using pip before running `setup.py develop` # Uses globals ``STACK_USER`` # setup_develop_no_requirements_update directory -function setup_develop_no_requirements_update() { +function setup_develop_no_requirements_update { local project_dir=$1 pip_install -e $project_dir @@ -1271,7 +1271,7 @@ function setup_develop_no_requirements_update() { # remove extra commas from the input string (i.e. ``ENABLED_SERVICES``) # _cleanup_service_list service-list -function _cleanup_service_list () { +function _cleanup_service_list { echo "$1" | sed -e ' s/,,/,/g; s/^,//; @@ -1284,7 +1284,7 @@ function _cleanup_service_list () { # before a minimal installation # Uses global ``ENABLED_SERVICES`` # disable_all_services -function disable_all_services() { +function disable_all_services { ENABLED_SERVICES="" } @@ -1293,7 +1293,7 @@ function disable_all_services() { # ENABLED_SERVICES+=",-rabbit" # Uses global ``ENABLED_SERVICES`` # disable_negated_services -function disable_negated_services() { +function disable_negated_services { local tmpsvcs="${ENABLED_SERVICES}" local service for service in ${tmpsvcs//,/ }; do @@ -1314,7 +1314,7 @@ function disable_negated_services() { # for nova, glance, and neutron built into is_service_enabled(). # Uses global ``ENABLED_SERVICES`` # disable_service service [service ...] -function disable_service() { +function disable_service { local tmpsvcs=",${ENABLED_SERVICES}," local service for service in $@; do @@ -1335,7 +1335,7 @@ function disable_service() { # for nova, glance, and neutron built into is_service_enabled(). # Uses global ``ENABLED_SERVICES`` # enable_service service [service ...] -function enable_service() { +function enable_service { local tmpsvcs="${ENABLED_SERVICES}" for service in $@; do if ! is_service_enabled $service; then @@ -1369,7 +1369,7 @@ function enable_service() { # # Uses global ``ENABLED_SERVICES`` # is_service_enabled service [service ...] -function is_service_enabled() { +function is_service_enabled { local xtrace=$(set +o | grep xtrace) set +o xtrace local enabled=1 @@ -1424,7 +1424,7 @@ function use_exclusive_service { # Only run the command if the target file (the last arg) is not on an # NFS filesystem. -function _safe_permission_operation() { +function _safe_permission_operation { local xtrace=$(set +o | grep xtrace) set +o xtrace local args=( $@ ) @@ -1457,7 +1457,7 @@ function _safe_permission_operation() { # Exit 0 if address is in network or 1 if address is not in network # ip-range is in CIDR notation: 1.2.3.4/20 # address_in_net ip-address ip-range -function address_in_net() { +function address_in_net { local ip=$1 local range=$2 local masklen=${range#*/} @@ -1468,7 +1468,7 @@ function address_in_net() { # Add a user to a group. # add_user_to_group user group -function add_user_to_group() { +function add_user_to_group { local user=$1 local group=$2 @@ -1486,7 +1486,7 @@ function add_user_to_group() { # Convert CIDR notation to a IPv4 netmask # cidr2netmask cidr-bits -function cidr2netmask() { +function cidr2netmask { local maskpat="255 255 255 255" local maskdgt="254 252 248 240 224 192 128" set -- ${maskpat:0:$(( ($1 / 8) * 4 ))}${maskdgt:$(( (7 - ($1 % 8)) * 4 )):3} @@ -1509,7 +1509,7 @@ function cp_it { # # http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh -function export_proxy_variables() { +function export_proxy_variables { if [[ -n "$http_proxy" ]]; then export http_proxy=$http_proxy fi @@ -1522,7 +1522,7 @@ function export_proxy_variables() { } # Returns true if the directory is on a filesystem mounted via NFS. -function is_nfs_directory() { +function is_nfs_directory { local mount_type=`stat -f -L -c %T $1` test "$mount_type" == "nfs" } @@ -1530,7 +1530,7 @@ function is_nfs_directory() { # Return the network portion of the given IP address using netmask # netmask is in the traditional dotted-quad format # maskip ip-address netmask -function maskip() { +function maskip { local ip=$1 local mask=$2 local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}" @@ -1540,7 +1540,7 @@ function maskip() { # Service wrapper to restart services # restart_service service-name -function restart_service() { +function restart_service { if is_ubuntu; then sudo /usr/sbin/service $1 restart else @@ -1550,19 +1550,19 @@ function restart_service() { # Only change permissions of a file or directory if it is not on an # NFS filesystem. -function safe_chmod() { +function safe_chmod { _safe_permission_operation chmod $@ } # Only change ownership of a file or directory if it is not on an NFS # filesystem. -function safe_chown() { +function safe_chown { _safe_permission_operation chown $@ } # Service wrapper to start services # start_service service-name -function start_service() { +function start_service { if is_ubuntu; then sudo /usr/sbin/service $1 start else @@ -1572,7 +1572,7 @@ function start_service() { # Service wrapper to stop services # stop_service service-name -function stop_service() { +function stop_service { if is_ubuntu; then sudo /usr/sbin/service $1 stop else diff --git a/lib/apache b/lib/apache index 0e5712f56b..2d5e39a65d 100644 --- a/lib/apache +++ b/lib/apache @@ -50,7 +50,7 @@ fi # # Uses global ``APACHE_ENABLED_SERVICES`` # APACHE_ENABLED_SERVICES service [service ...] -function is_apache_enabled_service() { +function is_apache_enabled_service { services=$@ for service in ${services}; do [[ ,${APACHE_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 @@ -59,7 +59,7 @@ function is_apache_enabled_service() { } # install_apache_wsgi() - Install Apache server and wsgi module -function install_apache_wsgi() { +function install_apache_wsgi { # Apache installation, because we mark it NOPRIME if is_ubuntu; then # Install apache2, which is NOPRIME'd @@ -79,7 +79,7 @@ function install_apache_wsgi() { } # enable_apache_site() - Enable a particular apache site -function enable_apache_site() { +function enable_apache_site { local site=$@ if is_ubuntu; then sudo a2ensite ${site} @@ -90,7 +90,7 @@ function enable_apache_site() { } # disable_apache_site() - Disable a particular apache site -function disable_apache_site() { +function disable_apache_site { local site=$@ if is_ubuntu; then sudo a2dissite ${site} @@ -100,12 +100,12 @@ function disable_apache_site() { } # start_apache_server() - Start running apache server -function start_apache_server() { +function start_apache_server { start_service $APACHE_NAME } # stop_apache_server() - Stop running apache server -function stop_apache_server() { +function stop_apache_server { if [ -n "$APACHE_NAME" ]; then stop_service $APACHE_NAME else @@ -114,7 +114,7 @@ function stop_apache_server() { } # restart_apache_server -function restart_apache_server() { +function restart_apache_server { restart_service $APACHE_NAME } diff --git a/lib/baremetal b/lib/baremetal index d8cd7e936c..473de0dd39 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -166,7 +166,7 @@ BM_SHELL_IN_A_BOX=${BM_SHELL_IN_A_BOX:-http://shellinabox.googlecode.com/files/s # Check if baremetal is properly enabled # Returns false if VIRT_DRIVER is not baremetal, or if ENABLED_SERVICES # does not contain "baremetal" -function is_baremetal() { +function is_baremetal { if [[ "$ENABLED_SERVICES" =~ 'baremetal' && "$VIRT_DRIVER" = 'baremetal' ]]; then return 0 fi @@ -175,7 +175,7 @@ function is_baremetal() { # Install diskimage-builder and shell-in-a-box # so that we can build the deployment kernel & ramdisk -function prepare_baremetal_toolchain() { +function prepare_baremetal_toolchain { git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH git_clone $BM_POSEUR_REPO $BM_POSEUR_DIR $BM_POSEUR_BRANCH @@ -197,7 +197,7 @@ function prepare_baremetal_toolchain() { } # set up virtualized environment for devstack-gate testing -function create_fake_baremetal_env() { +function create_fake_baremetal_env { local bm_poseur="$BM_POSEUR_DIR/bm_poseur" # TODO(deva): add support for >1 VM sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-bridge @@ -211,14 +211,14 @@ function create_fake_baremetal_env() { BM_SECOND_MAC='12:34:56:78:90:12' } -function cleanup_fake_baremetal_env() { +function cleanup_fake_baremetal_env { local bm_poseur="$BM_POSEUR_DIR/bm_poseur" sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-vm sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-bridge } # prepare various directories needed by baremetal hypervisor -function configure_baremetal_nova_dirs() { +function configure_baremetal_nova_dirs { # ensure /tftpboot is prepared sudo mkdir -p /tftpboot sudo mkdir -p /tftpboot/pxelinux.cfg @@ -249,7 +249,7 @@ function configure_baremetal_nova_dirs() { # build deploy kernel+ramdisk, then upload them to glance # this function sets BM_DEPLOY_KERNEL_ID and BM_DEPLOY_RAMDISK_ID -function upload_baremetal_deploy() { +function upload_baremetal_deploy { token=$1 if [ "$BM_BUILD_DEPLOY_RAMDISK" = "True" ]; then @@ -281,7 +281,7 @@ function upload_baremetal_deploy() { # create a basic baremetal flavor, associated with deploy kernel & ramdisk # # Usage: create_baremetal_flavor -function create_baremetal_flavor() { +function create_baremetal_flavor { aki=$1 ari=$2 nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \ @@ -298,7 +298,7 @@ function create_baremetal_flavor() { # Sets KERNEL_ID and RAMDISK_ID # # Usage: extract_and_upload_k_and_r_from_image $token $file -function extract_and_upload_k_and_r_from_image() { +function extract_and_upload_k_and_r_from_image { token=$1 file=$2 image_name=$(basename "$file" ".qcow2") @@ -339,7 +339,7 @@ function extract_and_upload_k_and_r_from_image() { # Takes the same parameters, but has some peculiarities which made it # easier to create a separate method, rather than complicate the logic # of the existing function. -function upload_baremetal_image() { +function upload_baremetal_image { local image_url=$1 local token=$2 @@ -429,7 +429,7 @@ function upload_baremetal_image() { DEFAULT_IMAGE_NAME="${IMAGE_NAME%.img}" } -function clear_baremetal_of_all_nodes() { +function clear_baremetal_of_all_nodes { list=$(nova baremetal-node-list | awk -F '| ' 'NR>3 {print $2}' ) for node in $list; do nova baremetal-node-delete $node @@ -440,7 +440,7 @@ function clear_baremetal_of_all_nodes() { # Defaults to using BM_FIRST_MAC and BM_SECOND_MAC if parameters not specified # # Usage: add_baremetal_node -function add_baremetal_node() { +function add_baremetal_node { mac_1=${1:-$BM_FIRST_MAC} mac_2=${2:-$BM_SECOND_MAC} diff --git a/lib/ceilometer b/lib/ceilometer index 6c87d03b13..d20d628247 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -105,18 +105,18 @@ create_ceilometer_accounts() { # cleanup_ceilometer() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_ceilometer() { +function cleanup_ceilometer { mongo ceilometer --eval "db.dropDatabase();" } # configure_ceilometerclient() - Set config files, create data dirs, etc -function configure_ceilometerclient() { +function configure_ceilometerclient { setup_develop $CEILOMETERCLIENT_DIR sudo install -D -m 0644 -o $STACK_USER {$CEILOMETERCLIENT_DIR/tools/,/etc/bash_completion.d/}ceilometer.bash_completion } # configure_ceilometer() - Set config files, create data dirs, etc -function configure_ceilometer() { +function configure_ceilometer { setup_develop $CEILOMETER_DIR [ ! -d $CEILOMETER_CONF_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_CONF_DIR @@ -162,7 +162,7 @@ function configure_ceilometer() { fi } -function configure_mongodb() { +function configure_mongodb { if is_fedora; then # install mongodb client install_package mongodb @@ -174,7 +174,7 @@ function configure_mongodb() { } # init_ceilometer() - Initialize etc. -function init_ceilometer() { +function init_ceilometer { # Create cache dir sudo mkdir -p $CEILOMETER_AUTH_CACHE_DIR sudo chown $STACK_USER $CEILOMETER_AUTH_CACHE_DIR @@ -187,17 +187,17 @@ function init_ceilometer() { } # install_ceilometer() - Collect source and prepare -function install_ceilometer() { +function install_ceilometer { git_clone $CEILOMETER_REPO $CEILOMETER_DIR $CEILOMETER_BRANCH } # install_ceilometerclient() - Collect source and prepare -function install_ceilometerclient() { +function install_ceilometerclient { git_clone $CEILOMETERCLIENT_REPO $CEILOMETERCLIENT_DIR $CEILOMETERCLIENT_BRANCH } # start_ceilometer() - Start running processes, including screen -function start_ceilometer() { +function start_ceilometer { if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" fi @@ -216,7 +216,7 @@ function start_ceilometer() { } # stop_ceilometer() - Stop running processes -function stop_ceilometer() { +function stop_ceilometer { # Kill the ceilometer screen windows for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do screen_stop $serv diff --git a/lib/cinder b/lib/cinder index e8f30b683c..d003f5dc7b 100644 --- a/lib/cinder +++ b/lib/cinder @@ -102,7 +102,7 @@ function is_cinder_enabled { # _clean_lvm_lv removes all cinder LVM volumes # # Usage: _clean_lvm_lv $VOLUME_GROUP $VOLUME_NAME_PREFIX -function _clean_lvm_lv() { +function _clean_lvm_lv { local vg=$1 local lv_prefix=$2 @@ -119,7 +119,7 @@ function _clean_lvm_lv() { # volume group used by cinder # # Usage: _clean_lvm_backing_file() $VOLUME_GROUP -function _clean_lvm_backing_file() { +function _clean_lvm_backing_file { local vg=$1 # if there is no logical volume left, it's safe to attempt a cleanup @@ -136,7 +136,7 @@ function _clean_lvm_backing_file() { # cleanup_cinder() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_cinder() { +function cleanup_cinder { # ensure the volume group is cleared up because fails might # leave dead volumes in the group TARGETS=$(sudo tgtadm --op show --mode target) @@ -181,7 +181,7 @@ function cleanup_cinder() { } # configure_cinder_rootwrap() - configure Cinder's rootwrap -function configure_cinder_rootwrap() { +function configure_cinder_rootwrap { # Set the paths of certain binaries CINDER_ROOTWRAP=$(get_rootwrap_location cinder) @@ -212,7 +212,7 @@ function configure_cinder_rootwrap() { } # configure_cinder() - Set config files, create data dirs, etc -function configure_cinder() { +function configure_cinder { if [[ ! -d $CINDER_CONF_DIR ]]; then sudo mkdir -p $CINDER_CONF_DIR fi @@ -328,7 +328,7 @@ function configure_cinder() { # service cinder admin # if enabled # Migrated from keystone_data.sh -create_cinder_accounts() { +function create_cinder_accounts { SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") @@ -373,14 +373,14 @@ create_cinder_accounts() { } # create_cinder_cache_dir() - Part of the init_cinder() process -function create_cinder_cache_dir() { +function create_cinder_cache_dir { # Create cache dir sudo mkdir -p $CINDER_AUTH_CACHE_DIR sudo chown $STACK_USER $CINDER_AUTH_CACHE_DIR rm -f $CINDER_AUTH_CACHE_DIR/* } -create_cinder_volume_group() { +function create_cinder_volume_group { # According to the ``CINDER_MULTI_LVM_BACKEND`` value, configure one or two default volumes # group called ``stack-volumes`` (and ``stack-volumes2``) for the volume # service if it (they) does (do) not yet exist. If you don't wish to use a @@ -428,7 +428,7 @@ create_cinder_volume_group() { } # init_cinder() - Initialize database and volume group -function init_cinder() { +function init_cinder { # Force nova volumes off NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/osapi_volume,//") @@ -464,20 +464,20 @@ function init_cinder() { } # install_cinder() - Collect source and prepare -function install_cinder() { +function install_cinder { git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH setup_develop $CINDER_DIR } # install_cinderclient() - Collect source and prepare -function install_cinderclient() { +function install_cinderclient { git_clone $CINDERCLIENT_REPO $CINDERCLIENT_DIR $CINDERCLIENT_BRANCH setup_develop $CINDERCLIENT_DIR sudo install -D -m 0644 -o $STACK_USER {$CINDERCLIENT_DIR/tools/,/etc/bash_completion.d/}cinder.bash_completion } # apply config.d approach for cinder volumes directory -function _configure_tgt_for_config_d() { +function _configure_tgt_for_config_d { if [[ ! -d /etc/tgt/stack.d/ ]]; then sudo ln -sf $CINDER_STATE_PATH/volumes /etc/tgt/stack.d echo "include /etc/tgt/stack.d/*" | sudo tee -a /etc/tgt/targets.conf @@ -485,7 +485,7 @@ function _configure_tgt_for_config_d() { } # start_cinder() - Start running processes, including screen -function start_cinder() { +function start_cinder { if is_service_enabled c-vol; then # Delete any old stack.conf sudo rm -f /etc/tgt/conf.d/stack.conf @@ -529,7 +529,7 @@ function start_cinder() { } # stop_cinder() - Stop running processes -function stop_cinder() { +function stop_cinder { # Kill the cinder screen windows for serv in c-api c-bak c-sch c-vol; do screen_stop $serv diff --git a/lib/cinder_plugins/XenAPINFS b/lib/cinder_plugins/XenAPINFS index 72e1c1398c..fa10715bdf 100644 --- a/lib/cinder_plugins/XenAPINFS +++ b/lib/cinder_plugins/XenAPINFS @@ -27,7 +27,7 @@ set +o xtrace # ------------ # configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver() { +function configure_cinder_driver { iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver" iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL" iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME" diff --git a/lib/cinder_plugins/glusterfs b/lib/cinder_plugins/glusterfs index a0c5ae8d5e..b4196e4738 100644 --- a/lib/cinder_plugins/glusterfs +++ b/lib/cinder_plugins/glusterfs @@ -27,7 +27,7 @@ set +o xtrace # ------------ # configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver() { +function configure_cinder_driver { # To use glusterfs, set the following in localrc: # CINDER_DRIVER=glusterfs # CINDER_GLUSTERFS_SHARES="127.0.0.1:/vol1;127.0.0.1:/vol2" diff --git a/lib/cinder_plugins/nfs b/lib/cinder_plugins/nfs index ea2c9ce665..2d9d875245 100644 --- a/lib/cinder_plugins/nfs +++ b/lib/cinder_plugins/nfs @@ -27,7 +27,7 @@ set +o xtrace # ------------ # configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver() { +function configure_cinder_driver { iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.nfs.NfsDriver" iniset $CINDER_CONF DEFAULT nfs_shares_config "$CINDER_CONF_DIR/nfs_shares.conf" echo "$CINDER_NFS_SERVERPATH" | sudo tee "$CINDER_CONF_DIR/nfs_shares.conf" diff --git a/lib/cinder_plugins/sheepdog b/lib/cinder_plugins/sheepdog index 4435932371..30c60c6efe 100644 --- a/lib/cinder_plugins/sheepdog +++ b/lib/cinder_plugins/sheepdog @@ -27,7 +27,7 @@ set +o xtrace # ------------ # configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver() { +function configure_cinder_driver { iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver" } diff --git a/lib/cinder_plugins/solidfire b/lib/cinder_plugins/solidfire index 47c113e1a2..2c970b5adf 100644 --- a/lib/cinder_plugins/solidfire +++ b/lib/cinder_plugins/solidfire @@ -27,7 +27,7 @@ set +o xtrace # ------------ # configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver() { +function configure_cinder_driver { # To use solidfire, set the following in localrc: # CINDER_DRIVER=solidfire # SAN_IP= diff --git a/lib/cinder_plugins/vsphere b/lib/cinder_plugins/vsphere index c8cab6a8c1..436b060377 100644 --- a/lib/cinder_plugins/vsphere +++ b/lib/cinder_plugins/vsphere @@ -27,7 +27,7 @@ set +o xtrace # ------------ # configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver() { +function configure_cinder_driver { iniset $CINDER_CONF DEFAULT vmware_host_ip "$VMWAREAPI_IP" iniset $CINDER_CONF DEFAULT vmware_host_username "$VMWAREAPI_USER" iniset $CINDER_CONF DEFAULT vmware_host_password "$VMWAREAPI_PASSWORD" diff --git a/lib/config b/lib/config index 1678aec3fc..552aeb0ad1 100644 --- a/lib/config +++ b/lib/config @@ -25,7 +25,7 @@ CONFIG_AWK_CMD=${CONFIG_AWK_CMD:-awk} # Get the section for the specific group and config file # get_meta_section infile group configfile -function get_meta_section() { +function get_meta_section { local file=$1 local matchgroup=$2 local configfile=$3 @@ -57,7 +57,7 @@ function get_meta_section() { # Get a list of config files for a specific group # get_meta_section_files infile group -function get_meta_section_files() { +function get_meta_section_files { local file=$1 local matchgroup=$2 @@ -77,7 +77,7 @@ function get_meta_section_files() { # Merge the contents of a meta-config file into its destination config file # If configfile does not exist it will be created. # merge_config_file infile group configfile -function merge_config_file() { +function merge_config_file { local file=$1 local matchgroup=$2 local configfile=$3 @@ -106,7 +106,7 @@ function merge_config_file() { # Merge all of the files specified by group # merge_config_group infile group [group ...] -function merge_config_group() { +function merge_config_group { local localfile=$1; shift local matchgroups=$@ diff --git a/lib/gantt b/lib/gantt index 832d7590df..8db2ca1406 100644 --- a/lib/gantt +++ b/lib/gantt @@ -47,42 +47,42 @@ GANTT_BIN_DIR=$(get_python_exec_prefix) # cleanup_gantt() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_gantt() { +function cleanup_gantt { echo "Cleanup Gantt" } # configure_gantt() - Set config files, create data dirs, etc -function configure_gantt() { +function configure_gantt { echo "Configure Gantt" } # init_gantt() - Initialize database and volume group -function init_gantt() { +function init_gantt { echo "Initialize Gantt" } # install_gantt() - Collect source and prepare -function install_gantt() { +function install_gantt { git_clone $GANTT_REPO $GANTT_DIR $GANTT_BRANCH setup_develop $GANTT_DIR } # install_ganttclient() - Collect source and prepare -function install_ganttclient() { +function install_ganttclient { echo "Install Gantt Client" # git_clone $GANTTCLIENT_REPO $GANTTCLIENT_DIR $GANTTCLIENT_BRANCH # setup_develop $GANTTCLIENT_DIR } # start_gantt() - Start running processes, including screen -function start_gantt() { +function start_gantt { if is_service_enabled gantt; then screen_it gantt "cd $GANTT_DIR && $GANTT_BIN_DIR/gantt-scheduler --config-file $GANTT_CONF" fi } # stop_gantt() - Stop running processes -function stop_gantt() { +function stop_gantt { echo "Stop Gantt" screen_stop gantt } diff --git a/lib/glance b/lib/glance index 1ebeeb3b2e..8a4c21b3f2 100644 --- a/lib/glance +++ b/lib/glance @@ -68,14 +68,14 @@ function is_glance_enabled { # cleanup_glance() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_glance() { +function cleanup_glance { # kill instances (nova) # delete image files (glance) sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $GLANCE_AUTH_CACHE_DIR } # configure_glance() - Set config files, create data dirs, etc -function configure_glance() { +function configure_glance { if [[ ! -d $GLANCE_CONF_DIR ]]; then sudo mkdir -p $GLANCE_CONF_DIR fi @@ -160,7 +160,7 @@ function configure_glance() { } # create_glance_cache_dir() - Part of the init_glance() process -function create_glance_cache_dir() { +function create_glance_cache_dir { # Create cache dir sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/api @@ -171,7 +171,7 @@ function create_glance_cache_dir() { } # init_glance() - Initialize databases, etc. -function init_glance() { +function init_glance { # Delete existing images rm -rf $GLANCE_IMAGE_DIR mkdir -p $GLANCE_IMAGE_DIR @@ -190,19 +190,19 @@ function init_glance() { } # install_glanceclient() - Collect source and prepare -function install_glanceclient() { +function install_glanceclient { git_clone $GLANCECLIENT_REPO $GLANCECLIENT_DIR $GLANCECLIENT_BRANCH setup_develop $GLANCECLIENT_DIR } # install_glance() - Collect source and prepare -function install_glance() { +function install_glance { git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH setup_develop $GLANCE_DIR } # start_glance() - Start running processes, including screen -function start_glance() { +function start_glance { screen_it g-reg "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." @@ -212,7 +212,7 @@ function start_glance() { } # stop_glance() - Stop running processes -function stop_glance() { +function stop_glance { # Kill the Glance screen windows screen_stop g-api screen_stop g-reg diff --git a/lib/heat b/lib/heat index 972c35ce72..d0c0302016 100644 --- a/lib/heat +++ b/lib/heat @@ -47,14 +47,14 @@ TEMPEST_SERVICES+=,heat # cleanup_heat() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_heat() { +function cleanup_heat { sudo rm -rf $HEAT_AUTH_CACHE_DIR sudo rm -rf $HEAT_ENV_DIR sudo rm -rf $HEAT_TEMPLATES_DIR } # configure_heat() - Set config files, create data dirs, etc -function configure_heat() { +function configure_heat { setup_develop $HEAT_DIR if [[ ! -d $HEAT_CONF_DIR ]]; then @@ -137,7 +137,7 @@ function configure_heat() { } # init_heat() - Initialize database -function init_heat() { +function init_heat { # (re)create heat database recreate_database heat utf8 @@ -147,26 +147,26 @@ function init_heat() { } # create_heat_cache_dir() - Part of the init_heat() process -function create_heat_cache_dir() { +function create_heat_cache_dir { # Create cache dirs sudo mkdir -p $HEAT_AUTH_CACHE_DIR sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR } # install_heatclient() - Collect source and prepare -function install_heatclient() { +function install_heatclient { git_clone $HEATCLIENT_REPO $HEATCLIENT_DIR $HEATCLIENT_BRANCH setup_develop $HEATCLIENT_DIR sudo install -D -m 0644 -o $STACK_USER {$HEATCLIENT_DIR/tools/,/etc/bash_completion.d/}heat.bash_completion } # install_heat() - Collect source and prepare -function install_heat() { +function install_heat { git_clone $HEAT_REPO $HEAT_DIR $HEAT_BRANCH } # start_heat() - Start running processes, including screen -function start_heat() { +function start_heat { screen_it h-eng "cd $HEAT_DIR; bin/heat-engine --config-file=$HEAT_CONF" screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-file=$HEAT_CONF" screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-file=$HEAT_CONF" @@ -174,7 +174,7 @@ function start_heat() { } # stop_heat() - Stop running processes -function stop_heat() { +function stop_heat { # Kill the screen windows for serv in h-eng h-api h-api-cfn h-api-cw; do screen_stop $serv @@ -198,7 +198,7 @@ function disk_image_create { # create_heat_accounts() - Set up common required heat accounts # Note this is in addition to what is in files/keystone_data.sh -function create_heat_accounts() { +function create_heat_accounts { # Note we have to pass token/endpoint here because the current endpoint and # version negotiation in OSC means just --os-identity-api-version=3 won't work KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" diff --git a/lib/horizon b/lib/horizon index 2f5795d1ca..27c2d26a01 100644 --- a/lib/horizon +++ b/lib/horizon @@ -39,7 +39,7 @@ TEMPEST_SERVICES+=,horizon # --------- # utility method of setting python option -function _horizon_config_set() { +function _horizon_config_set { local file=$1 local section=$2 local option=$3 @@ -64,7 +64,7 @@ function _horizon_config_set() { # cleanup_horizon() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_horizon() { +function cleanup_horizon { if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then # If ``/usr/bin/node`` points into ``$DEST`` # we installed it via ``install_nodejs`` @@ -75,12 +75,12 @@ function cleanup_horizon() { } # configure_horizon() - Set config files, create data dirs, etc -function configure_horizon() { +function configure_horizon { setup_develop $HORIZON_DIR } # init_horizon() - Initialize databases, etc. -function init_horizon() { +function init_horizon { # ``local_settings.py`` is used to override horizon default settings. local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py cp $HORIZON_SETTINGS $local_settings @@ -143,7 +143,7 @@ function init_horizon() { } # install_horizon() - Collect source and prepare -function install_horizon() { +function install_horizon { # Apache installation, because we mark it NOPRIME install_apache_wsgi @@ -151,13 +151,13 @@ function install_horizon() { } # start_horizon() - Start running processes, including screen -function start_horizon() { +function start_horizon { restart_apache_server screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log" } # stop_horizon() - Stop running processes (non-screen) -function stop_horizon() { +function stop_horizon { stop_apache_server } diff --git a/lib/infra b/lib/infra index 0dcf0ad980..7f70ff2787 100644 --- a/lib/infra +++ b/lib/infra @@ -27,7 +27,7 @@ REQUIREMENTS_DIR=$DEST/requirements # ------------ # unfubar_setuptools() - Unbreak the giant mess that is the current state of setuptools -function unfubar_setuptools() { +function unfubar_setuptools { # this is a giant game of who's on first, but it does consistently work # there is hope that upstream python packaging fixes this in the future echo_summary "Unbreaking setuptools" @@ -40,7 +40,7 @@ function unfubar_setuptools() { # install_infra() - Collect source and prepare -function install_infra() { +function install_infra { # bring down global requirements git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH diff --git a/lib/ironic b/lib/ironic index 607b13125a..177188dd06 100644 --- a/lib/ironic +++ b/lib/ironic @@ -57,25 +57,25 @@ function is_ironic_enabled { } # install_ironic() - Collect source and prepare -function install_ironic() { +function install_ironic { git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH setup_develop $IRONIC_DIR } # install_ironicclient() - Collect sources and prepare -function install_ironicclient() { +function install_ironicclient { git_clone $IRONICCLIENT_REPO $IRONICCLIENT_DIR $IRONICCLIENT_BRANCH setup_develop $IRONICCLIENT_DIR } # cleanup_ironic() - Remove residual data files, anything left over from previous # runs that would need to clean up. -function cleanup_ironic() { +function cleanup_ironic { sudo rm -rf $IRONIC_AUTH_CACHE_DIR } # configure_ironic() - Set config files, create data dirs, etc -function configure_ironic() { +function configure_ironic { if [[ ! -d $IRONIC_CONF_DIR ]]; then sudo mkdir -p $IRONIC_CONF_DIR fi @@ -101,7 +101,7 @@ function configure_ironic() { # configure_ironic_api() - Is used by configure_ironic(). Performs # API specific configuration. -function configure_ironic_api() { +function configure_ironic_api { iniset $IRONIC_CONF_FILE DEFAULT auth_strategy keystone iniset $IRONIC_CONF_FILE DEFAULT policy_file $IRONIC_POLICY_JSON iniset $IRONIC_CONF_FILE keystone_authtoken auth_host $KEYSTONE_AUTH_HOST @@ -120,7 +120,7 @@ function configure_ironic_api() { # configure_ironic_conductor() - Is used by configure_ironic(). # Sets conductor specific settings. -function configure_ironic_conductor() { +function configure_ironic_conductor { cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR @@ -128,7 +128,7 @@ function configure_ironic_conductor() { } # create_ironic_cache_dir() - Part of the init_ironic() process -function create_ironic_cache_dir() { +function create_ironic_cache_dir { # Create cache dir sudo mkdir -p $IRONIC_AUTH_CACHE_DIR/api sudo chown $STACK_USER $IRONIC_AUTH_CACHE_DIR/api @@ -143,7 +143,7 @@ function create_ironic_cache_dir() { # Tenant User Roles # ------------------------------------------------------------------ # service ironic admin # if enabled -create_ironic_accounts() { +function create_ironic_accounts { SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") @@ -178,7 +178,7 @@ create_ironic_accounts() { # init_ironic() - Initialize databases, etc. -function init_ironic() { +function init_ironic { # (Re)create ironic database recreate_database ironic utf8 @@ -192,7 +192,7 @@ function init_ironic() { } # start_ironic() - Start running processes, including screen -function start_ironic() { +function start_ironic { # Start Ironic API server, if enabled. if is_service_enabled ir-api; then start_ironic_api @@ -206,7 +206,7 @@ function start_ironic() { # start_ironic_api() - Used by start_ironic(). # Starts Ironic API server. -function start_ironic_api() { +function start_ironic_api { screen_it ir-api "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE" echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then @@ -216,13 +216,13 @@ function start_ironic_api() { # start_ironic_conductor() - Used by start_ironic(). # Starts Ironic conductor. -function start_ironic_conductor() { +function start_ironic_conductor { screen_it ir-cond "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-conductor --config-file=$IRONIC_CONF_FILE" # TODO(romcheg): Find a way to check whether the conductor has started. } # stop_ironic() - Stop running processes -function stop_ironic() { +function stop_ironic { # Kill the Ironic screen windows screen -S $SCREEN_NAME -p ir-api -X kill screen -S $SCREEN_NAME -p ir-cond -X kill diff --git a/lib/keystone b/lib/keystone index 73af1d356d..0548c24e87 100644 --- a/lib/keystone +++ b/lib/keystone @@ -90,7 +90,7 @@ fi # --------- # cleanup_keystone() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_keystone() { +function cleanup_keystone { # kill instances (nova) # delete image files (glance) # This function intentionally left blank @@ -98,14 +98,14 @@ function cleanup_keystone() { } # _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file -function _cleanup_keystone_apache_wsgi() { +function _cleanup_keystone_apache_wsgi { sudo rm -f $KEYSTONE_WSGI_DIR/*.wsgi disable_apache_site keystone sudo rm -f /etc/$APACHE_NAME/$APACHE_CONF_DIR/keystone } # _config_keystone_apache_wsgi() - Set WSGI config files of Keystone -function _config_keystone_apache_wsgi() { +function _config_keystone_apache_wsgi { sudo mkdir -p $KEYSTONE_WSGI_DIR # copy proxy vhost and wsgi file @@ -125,7 +125,7 @@ function _config_keystone_apache_wsgi() { } # configure_keystone() - Set config files, create data dirs, etc -function configure_keystone() { +function configure_keystone { if [[ ! -d $KEYSTONE_CONF_DIR ]]; then sudo mkdir -p $KEYSTONE_CONF_DIR fi @@ -272,7 +272,7 @@ function configure_keystone() { # invisible_to_admin demo Member # Migrated from keystone_data.sh -create_keystone_accounts() { +function create_keystone_accounts { # admin ADMIN_TENANT=$(openstack project create \ @@ -346,14 +346,14 @@ create_keystone_accounts() { # Configure the API version for the OpenStack projects. # configure_API_version conf_file version -function configure_API_version() { +function configure_API_version { local conf_file=$1 local api_version=$2 iniset $conf_file keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$api_version } # init_keystone() - Initialize databases, etc. -function init_keystone() { +function init_keystone { if is_service_enabled ldap; then init_ldap fi @@ -377,14 +377,14 @@ function init_keystone() { } # install_keystoneclient() - Collect source and prepare -function install_keystoneclient() { +function install_keystoneclient { git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH setup_develop $KEYSTONECLIENT_DIR sudo install -D -m 0644 -o $STACK_USER {$KEYSTONECLIENT_DIR/tools/,/etc/bash_completion.d/}keystone.bash_completion } # install_keystone() - Collect source and prepare -function install_keystone() { +function install_keystone { # only install ldap if the service has been enabled if is_service_enabled ldap; then install_ldap @@ -408,7 +408,7 @@ function install_keystone() { } # start_keystone() - Start running processes, including screen -function start_keystone() { +function start_keystone { # Get right service port for testing local service_port=$KEYSTONE_SERVICE_PORT if is_service_enabled tls-proxy; then @@ -436,7 +436,7 @@ function start_keystone() { } # stop_keystone() - Stop running processes -function stop_keystone() { +function stop_keystone { # Kill the Keystone screen window screen_stop key } diff --git a/lib/ldap b/lib/ldap index e4bd41624d..51d02519af 100644 --- a/lib/ldap +++ b/lib/ldap @@ -49,7 +49,7 @@ fi # Perform common variable substitutions on the data files # _ldap_varsubst file -function _ldap_varsubst() { +function _ldap_varsubst { local infile=$1 sed -e " s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER| @@ -62,7 +62,7 @@ function _ldap_varsubst() { } # clean_ldap() - Remove ldap server -function cleanup_ldap() { +function cleanup_ldap { uninstall_package $(get_packages ldap) if is_ubuntu; then uninstall_package slapd ldap-utils libslp1 @@ -76,7 +76,7 @@ function cleanup_ldap() { # init_ldap # init_ldap() - Initialize databases, etc. -function init_ldap() { +function init_ldap { local keystone_ldif TMP_LDAP_DIR=$(mktemp -d -t ldap.$$.XXXXXXXXXX) @@ -106,7 +106,7 @@ function init_ldap() { # install_ldap # install_ldap() - Collect source and prepare -function install_ldap() { +function install_ldap { echo "Installing LDAP inside function" echo "os_VENDOR is $os_VENDOR" @@ -143,17 +143,17 @@ function install_ldap() { } # start_ldap() - Start LDAP -function start_ldap() { +function start_ldap { sudo service $LDAP_SERVICE_NAME restart } # stop_ldap() - Stop LDAP -function stop_ldap() { +function stop_ldap { sudo service $LDAP_SERVICE_NAME stop } # clear_ldap_state() - Clear LDAP State -function clear_ldap_state() { +function clear_ldap_state { ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN" } diff --git a/lib/marconi b/lib/marconi index 1c8be49291..8cfc55c1dd 100644 --- a/lib/marconi +++ b/lib/marconi @@ -73,19 +73,19 @@ function is_marconi_enabled { # cleanup_marconi() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_marconi() { +function cleanup_marconi { if ! timeout $SERVICE_TIMEOUT sh -c "while ! mongo marconi --eval 'db.dropDatabase();'; do sleep 1; done"; then die $LINENO "Mongo DB did not start" fi } # configure_marconiclient() - Set config files, create data dirs, etc -function configure_marconiclient() { +function configure_marconiclient { setup_develop $MARCONICLIENT_DIR } # configure_marconi() - Set config files, create data dirs, etc -function configure_marconi() { +function configure_marconi { setup_develop $MARCONI_DIR [ ! -d $MARCONI_CONF_DIR ] && sudo mkdir -m 755 -p $MARCONI_CONF_DIR @@ -110,7 +110,7 @@ function configure_marconi() { fi } -function configure_mongodb() { +function configure_mongodb { # Set nssize to 2GB. This increases the number of namespaces supported # # per database. if is_ubuntu; then @@ -126,7 +126,7 @@ function configure_mongodb() { } # init_marconi() - Initialize etc. -function init_marconi() { +function init_marconi { # Create cache dir sudo mkdir -p $MARCONI_AUTH_CACHE_DIR sudo chown $STACK_USER $MARCONI_AUTH_CACHE_DIR @@ -134,19 +134,19 @@ function init_marconi() { } # install_marconi() - Collect source and prepare -function install_marconi() { +function install_marconi { git_clone $MARCONI_REPO $MARCONI_DIR $MARCONI_BRANCH setup_develop $MARCONI_DIR } # install_marconiclient() - Collect source and prepare -function install_marconiclient() { +function install_marconiclient { git_clone $MARCONICLIENT_REPO $MARCONICLIENT_DIR $MARCONICLIENT_BRANCH setup_develop $MARCONICLIENT_DIR } # start_marconi() - Start running processes, including screen -function start_marconi() { +function start_marconi { screen_it marconi-server "marconi-server --config-file $MARCONI_CONF" echo "Waiting for Marconi to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT/v1/health; do sleep 1; done"; then @@ -155,14 +155,14 @@ function start_marconi() { } # stop_marconi() - Stop running processes -function stop_marconi() { +function stop_marconi { # Kill the marconi screen windows for serv in marconi-server; do screen -S $SCREEN_NAME -p $serv -X kill done } -function create_marconi_accounts() { +function create_marconi_accounts { SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") diff --git a/lib/neutron b/lib/neutron index df276c71d5..35575c0379 100644 --- a/lib/neutron +++ b/lib/neutron @@ -253,7 +253,7 @@ function is_neutron_enabled { # configure_neutron() # Set common config for all neutron server and agents. -function configure_neutron() { +function configure_neutron { _configure_neutron_common iniset_rpc_backend neutron $NEUTRON_CONF DEFAULT @@ -289,7 +289,7 @@ function configure_neutron() { _configure_neutron_debug_command } -function create_nova_conf_neutron() { +function create_nova_conf_neutron { iniset $NOVA_CONF DEFAULT network_api_class "nova.network.neutronv2.api.API" iniset $NOVA_CONF DEFAULT neutron_admin_username "$Q_ADMIN_USERNAME" iniset $NOVA_CONF DEFAULT neutron_admin_password "$SERVICE_PASSWORD" @@ -316,7 +316,7 @@ function create_nova_conf_neutron() { } # create_neutron_cache_dir() - Part of the _neutron_setup_keystone() process -function create_neutron_cache_dir() { +function create_neutron_cache_dir { # Create cache dir sudo mkdir -p $NEUTRON_AUTH_CACHE_DIR sudo chown $STACK_USER $NEUTRON_AUTH_CACHE_DIR @@ -330,7 +330,7 @@ function create_neutron_cache_dir() { # service neutron admin # if enabled # Migrated from keystone_data.sh -function create_neutron_accounts() { +function create_neutron_accounts { SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") @@ -362,7 +362,7 @@ function create_neutron_accounts() { fi } -function create_neutron_initial_network() { +function create_neutron_initial_network { TENANT_ID=$(openstack project list | grep " demo " | get_field 1) die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for demo" @@ -429,27 +429,27 @@ function create_neutron_initial_network() { } # init_neutron() - Initialize databases, etc. -function init_neutron() { +function init_neutron { recreate_database $Q_DB_NAME utf8 # Run Neutron db migrations $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head } # install_neutron() - Collect source and prepare -function install_neutron() { +function install_neutron { git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH setup_develop $NEUTRON_DIR } # install_neutronclient() - Collect source and prepare -function install_neutronclient() { +function install_neutronclient { git_clone $NEUTRONCLIENT_REPO $NEUTRONCLIENT_DIR $NEUTRONCLIENT_BRANCH setup_develop $NEUTRONCLIENT_DIR sudo install -D -m 0644 -o $STACK_USER {$NEUTRONCLIENT_DIR/tools/,/etc/bash_completion.d/}neutron.bash_completion } # install_neutron_agent_packages() - Collect source and prepare -function install_neutron_agent_packages() { +function install_neutron_agent_packages { # install packages that are specific to plugin agent(s) if is_service_enabled q-agt q-dhcp q-l3; then neutron_plugin_install_agent_packages @@ -461,7 +461,7 @@ function install_neutron_agent_packages() { } # Start running processes, including screen -function start_neutron_service_and_check() { +function start_neutron_service_and_check { # build config-file options local cfg_file local CFG_FILE_OPTIONS="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" @@ -477,7 +477,7 @@ function start_neutron_service_and_check() { } # Start running processes, including screen -function start_neutron_agents() { +function start_neutron_agents { # Start up the neutron agents if enabled screen_it q-agt "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" screen_it q-dhcp "cd $NEUTRON_DIR && python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE" @@ -510,7 +510,7 @@ function start_neutron_agents() { } # stop_neutron() - Stop running processes (non-screen) -function stop_neutron() { +function stop_neutron { if is_service_enabled q-dhcp; then pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') [ ! -z "$pid" ] && sudo kill -9 $pid @@ -535,7 +535,7 @@ function stop_neutron() { # cleanup_neutron() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_neutron() { +function cleanup_neutron { if is_neutron_ovs_base_plugin; then neutron_ovs_base_cleanup fi @@ -549,7 +549,7 @@ function cleanup_neutron() { # _configure_neutron_common() # Set common config for all neutron server and agents. # This MUST be called before other ``_configure_neutron_*`` functions. -function _configure_neutron_common() { +function _configure_neutron_common { # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find if [[ ! -d $NEUTRON_CONF_DIR ]]; then sudo mkdir -p $NEUTRON_CONF_DIR @@ -611,7 +611,7 @@ function _configure_neutron_common() { _neutron_setup_rootwrap } -function _configure_neutron_debug_command() { +function _configure_neutron_debug_command { if [[ "$Q_USE_DEBUG_COMMAND" != "True" ]]; then return fi @@ -628,7 +628,7 @@ function _configure_neutron_debug_command() { neutron_plugin_configure_debug_command } -function _configure_neutron_dhcp_agent() { +function _configure_neutron_dhcp_agent { AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini @@ -652,7 +652,7 @@ function _configure_neutron_dhcp_agent() { neutron_plugin_configure_dhcp_agent } -function _configure_neutron_l3_agent() { +function _configure_neutron_l3_agent { Q_L3_ENABLED=True # for l3-agent, only use per tenant router if we have namespaces Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE @@ -676,7 +676,7 @@ function _configure_neutron_l3_agent() { neutron_plugin_configure_l3_agent } -function _configure_neutron_metadata_agent() { +function _configure_neutron_metadata_agent { AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent" Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini @@ -691,30 +691,29 @@ function _configure_neutron_metadata_agent() { } -function _configure_neutron_lbaas() { +function _configure_neutron_lbaas { neutron_agent_lbaas_configure_common neutron_agent_lbaas_configure_agent } -function _configure_neutron_metering() { +function _configure_neutron_metering { neutron_agent_metering_configure_common neutron_agent_metering_configure_agent } -function _configure_neutron_fwaas() { +function _configure_neutron_fwaas { neutron_fwaas_configure_common neutron_fwaas_configure_driver } -function _configure_neutron_vpn() -{ +function _configure_neutron_vpn { neutron_vpn_install_agent_packages neutron_vpn_configure_common } # _configure_neutron_plugin_agent() - Set config files for neutron plugin agent # It is called when q-agt is enabled. -function _configure_neutron_plugin_agent() { +function _configure_neutron_plugin_agent { # Specify the default root helper prior to agent configuration to # ensure that an agent's configuration can override the default iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND" @@ -727,7 +726,7 @@ function _configure_neutron_plugin_agent() { # _configure_neutron_service() - Set config files for neutron service # It is called when q-svc is enabled. -function _configure_neutron_service() { +function _configure_neutron_service { Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json @@ -765,7 +764,7 @@ function _configure_neutron_service() { #------------------ # _neutron_service_plugin_class_add() - add service plugin class -function _neutron_service_plugin_class_add() { +function _neutron_service_plugin_class_add { local service_plugin_class=$1 if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class @@ -775,7 +774,7 @@ function _neutron_service_plugin_class_add() { } # _neutron_setup_rootwrap() - configure Neutron's rootwrap -function _neutron_setup_rootwrap() { +function _neutron_setup_rootwrap { if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then return fi @@ -815,7 +814,7 @@ function _neutron_setup_rootwrap() { } # Configures keystone integration for neutron service and agents -function _neutron_setup_keystone() { +function _neutron_setup_keystone { local conf_file=$1 local section=$2 local use_auth_url=$3 @@ -842,7 +841,7 @@ function _neutron_setup_keystone() { fi } -function _neutron_setup_interface_driver() { +function _neutron_setup_interface_driver { # ovs_use_veth needs to be set before the plugin configuration # occurs to allow plugins to override the setting. @@ -854,14 +853,14 @@ function _neutron_setup_interface_driver() { # Functions for Neutron Exercises #-------------------------------- -function delete_probe() { +function delete_probe { local from_net="$1" net_id=`_get_net_id $from_net` probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id } -function setup_neutron_debug() { +function setup_neutron_debug { if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME` neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $public_net_id @@ -870,23 +869,23 @@ function setup_neutron_debug() { fi } -function teardown_neutron_debug() { +function teardown_neutron_debug { delete_probe $PUBLIC_NETWORK_NAME delete_probe $PRIVATE_NETWORK_NAME } -function _get_net_id() { +function _get_net_id { neutron --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD net-list | grep $1 | awk '{print $2}' } -function _get_probe_cmd_prefix() { +function _get_probe_cmd_prefix { local from_net="$1" net_id=`_get_net_id $from_net` probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" } -function _ping_check_neutron() { +function _ping_check_neutron { local from_net=$1 local ip=$2 local timeout_sec=$3 @@ -908,7 +907,7 @@ function _ping_check_neutron() { } # ssh check -function _ssh_check_neutron() { +function _ssh_check_neutron { local from_net=$1 local key_file=$2 local ip=$3 @@ -934,39 +933,39 @@ for f in $TOP_DIR/lib/neutron_thirdparty/*; do fi done -function _neutron_third_party_do() { +function _neutron_third_party_do { for third_party in ${NEUTRON_THIRD_PARTIES//,/ }; do ${1}_${third_party} done } # configure_neutron_third_party() - Set config files, create data dirs, etc -function configure_neutron_third_party() { +function configure_neutron_third_party { _neutron_third_party_do configure } # init_neutron_third_party() - Initialize databases, etc. -function init_neutron_third_party() { +function init_neutron_third_party { _neutron_third_party_do init } # install_neutron_third_party() - Collect source and prepare -function install_neutron_third_party() { +function install_neutron_third_party { _neutron_third_party_do install } # start_neutron_third_party() - Start running processes, including screen -function start_neutron_third_party() { +function start_neutron_third_party { _neutron_third_party_do start } # stop_neutron_third_party - Stop running processes (non-screen) -function stop_neutron_third_party() { +function stop_neutron_third_party { _neutron_third_party_do stop } # check_neutron_third_party_integration() - Check that third party integration is sane -function check_neutron_third_party_integration() { +function check_neutron_third_party_integration { _neutron_third_party_do check } diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index 1e4aa00121..4cb0da84ea 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -8,15 +8,15 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/ovs_base source $TOP_DIR/lib/neutron_thirdparty/bigswitch_floodlight # for third party service specific configuration values -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { : } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { _neutron_ovs_base_install_agent_packages } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/bigswitch Q_PLUGIN_CONF_FILENAME=restproxy.ini Q_DB_NAME="restproxy_neutron" @@ -25,23 +25,23 @@ function neutron_plugin_configure_common() { BS_FL_CONTROLLER_TIMEOUT=${BS_FL_CONTROLLER_TIMEOUT:-10} } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { _neutron_ovs_base_configure_debug_command } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { : } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { _neutron_ovs_base_configure_l3_agent } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { : } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { iniset /$Q_PLUGIN_CONF_FILE restproxy servers $BS_FL_CONTROLLERS_PORT iniset /$Q_PLUGIN_CONF_FILE restproxy servertimeout $BS_FL_CONTROLLER_TIMEOUT if [ "$BS_FL_VIF_DRIVER" = "ivs" ]; then @@ -49,7 +49,7 @@ function neutron_plugin_configure_service() { fi } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 if [ "$BS_FL_VIF_DRIVER" = "ivs" ]; then iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.IVSInterfaceDriver @@ -59,12 +59,12 @@ function neutron_plugin_setup_interface_driver() { } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 1 means False here return 1 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade index 8e18d04984..4443fa7823 100644 --- a/lib/neutron_plugins/brocade +++ b/lib/neutron_plugins/brocade @@ -5,53 +5,53 @@ BRCD_XTRACE=$(set +o | grep xtrace) set +o xtrace -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { return 1 } -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { install_package bridge-utils } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/brocade Q_PLUGIN_CONF_FILENAME=brocade.ini Q_DB_NAME="brcd_neutron" Q_PLUGIN_CLASS="neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2" } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-linuxbridge-agent" } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 0 means True here return 0 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco index 8948be6de4..7728eb177f 100644 --- a/lib/neutron_plugins/cisco +++ b/lib/neutron_plugins/cisco @@ -27,12 +27,12 @@ NCCLIENT_REPO=${NCCLIENT_REPO:-${GIT_BASE}/CiscoSystems/ncclient.git} NCCLIENT_BRANCH=${NCCLIENT_BRANCH:-master} # This routine put a prefix on an existing function name -function _prefix_function() { +function _prefix_function { declare -F $1 > /dev/null || die "$1 doesn't exist" eval "$(echo "${2}_${1}()"; declare -f ${1} | tail -n +2)" } -function _has_ovs_subplugin() { +function _has_ovs_subplugin { local subplugin for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do if [[ "$subplugin" == "openvswitch" ]]; then @@ -42,7 +42,7 @@ function _has_ovs_subplugin() { return 1 } -function _has_nexus_subplugin() { +function _has_nexus_subplugin { local subplugin for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do if [[ "$subplugin" == "nexus" ]]; then @@ -52,7 +52,7 @@ function _has_nexus_subplugin() { return 1 } -function _has_n1kv_subplugin() { +function _has_n1kv_subplugin { local subplugin for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do if [[ "$subplugin" == "n1kv" ]]; then @@ -64,7 +64,7 @@ function _has_n1kv_subplugin() { # This routine populates the cisco config file with the information for # a particular nexus switch -function _config_switch() { +function _config_switch { local cisco_cfg_file=$1 local switch_ip=$2 local username=$3 @@ -99,7 +99,7 @@ _prefix_function neutron_plugin_setup_interface_driver ovs _prefix_function has_neutron_plugin_security_group ovs # Check the version of the installed ncclient package -function check_ncclient_version() { +function check_ncclient_version { python << EOF version = '$NCCLIENT_VERSION' import sys @@ -115,13 +115,13 @@ EOF } # Install the ncclient package -function install_ncclient() { +function install_ncclient { git_clone $NCCLIENT_REPO $NCCLIENT_DIR $NCCLIENT_BRANCH (cd $NCCLIENT_DIR; sudo python setup.py install) } # Check if the required version of ncclient has been installed -function is_ncclient_installed() { +function is_ncclient_installed { # Check if the Cisco ncclient repository exists if [[ -d $NCCLIENT_DIR ]]; then remotes=$(cd $NCCLIENT_DIR; git remote -v | grep fetch | awk '{ print $2}') @@ -144,7 +144,7 @@ function is_ncclient_installed() { return 0 } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { if _has_ovs_subplugin; then ovs_has_neutron_plugin_security_group else @@ -152,14 +152,14 @@ function has_neutron_plugin_security_group() { fi } -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { # Cisco uses OVS if openvswitch subplugin is deployed _has_ovs_subplugin return } # populate required nova configuration parameters -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { if _has_ovs_subplugin; then ovs_neutron_plugin_create_nova_conf else @@ -167,13 +167,13 @@ function neutron_plugin_create_nova_conf() { fi } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { # Cisco plugin uses openvswitch to operate in one of its configurations ovs_neutron_plugin_install_agent_packages } # Configure common parameters -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { # setup default subplugins if [ ! -v Q_CISCO_PLUGIN_SUBPLUGINS ]; then declare -ga Q_CISCO_PLUGIN_SUBPLUGINS @@ -191,23 +191,23 @@ function neutron_plugin_configure_common() { Q_DB_NAME=cisco_neutron } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { if _has_ovs_subplugin; then ovs_neutron_plugin_configure_debug_command fi } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { if _has_ovs_subplugin; then ovs_neutron_plugin_configure_l3_agent fi } -function _configure_nexus_subplugin() { +function _configure_nexus_subplugin { local cisco_cfg_file=$1 # Install a known compatible ncclient from the Cisco repository if necessary @@ -252,7 +252,7 @@ function _configure_nexus_subplugin() { } # Configure n1kv plugin -function _configure_n1kv_subplugin() { +function _configure_n1kv_subplugin { local cisco_cfg_file=$1 # populate the cisco plugin cfg file with the VSM information @@ -270,13 +270,13 @@ function _configure_n1kv_subplugin() { _neutron_ovs_base_setup_bridge $OVS_BRIDGE } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { if _has_ovs_subplugin; then ovs_neutron_plugin_configure_plugin_agent fi } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { local subplugin local cisco_cfg_file @@ -318,7 +318,7 @@ function neutron_plugin_configure_service() { fi } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver } diff --git a/lib/neutron_plugins/embrane b/lib/neutron_plugins/embrane index 325e9397e6..62f9737e51 100644 --- a/lib/neutron_plugins/embrane +++ b/lib/neutron_plugins/embrane @@ -7,7 +7,7 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/openvswitch -save_function() { +function save_function { local ORIG_FUNC=$(declare -f $1) local NEW_FUNC="$2${ORIG_FUNC#$1}" eval "$NEW_FUNC" @@ -15,14 +15,14 @@ save_function() { save_function neutron_plugin_configure_service _neutron_plugin_configure_service -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/embrane Q_PLUGIN_CONF_FILENAME=heleos_conf.ini Q_DB_NAME="ovs_neutron" Q_PLUGIN_CLASS="neutron.plugins.embrane.plugins.embrane_ovs_plugin.EmbraneOvsPlugin" } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { _neutron_plugin_configure_service iniset /$Q_PLUGIN_CONF_FILE heleos esm_mgmt $HELEOS_ESM_MGMT iniset /$Q_PLUGIN_CONF_FILE heleos admin_username $HELEOS_ADMIN_USERNAME diff --git a/lib/neutron_plugins/linuxbridge b/lib/neutron_plugins/linuxbridge index 37bc748c37..362fd5b39e 100644 --- a/lib/neutron_plugins/linuxbridge +++ b/lib/neutron_plugins/linuxbridge @@ -7,14 +7,14 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/linuxbridge_agent -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/linuxbridge Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini Q_DB_NAME="neutron_linux_bridge" Q_PLUGIN_CLASS="neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2" } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE vlans tenant_network_type vlan else @@ -47,7 +47,7 @@ function neutron_plugin_configure_service() { done } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 0 means True here return 0 } diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index 85e8c085be..74799e477c 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -5,33 +5,33 @@ PLUGIN_XTRACE=$(set +o | grep xtrace) set +o xtrace -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { # linuxbridge doesn't use OVS return 1 } -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { : } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { install_package bridge-utils } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { # Setup physical network interface mappings. Override # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more # complex physical network configurations. @@ -63,12 +63,12 @@ function neutron_plugin_configure_plugin_agent() { done } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet index dd3b2baeca..742e3b2f0f 100644 --- a/lib/neutron_plugins/midonet +++ b/lib/neutron_plugins/midonet @@ -9,32 +9,32 @@ MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-ap MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { # MidoNet does not use l3-agent # 0 means True here return 1 } -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { : } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/midonet Q_PLUGIN_CONF_FILENAME=midonet.ini Q_DB_NAME="neutron_midonet" Q_PLUGIN_CLASS="neutron.plugins.midonet.plugin.MidonetPluginV2" } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { : } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { DHCP_DRIVER=${DHCP_DRIVER:-"neutron.plugins.midonet.agent.midonet_driver.DhcpNoOpDriver"} neutron_plugin_setup_interface_driver $Q_DHCP_CONF_FILE iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_driver $DHCP_DRIVER @@ -42,15 +42,15 @@ function neutron_plugin_configure_dhcp_agent() { iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { die $LINENO "q-l3 must not be executed with MidoNet plugin!" } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { die $LINENO "q-agt must not be executed with MidoNet plugin!" } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { if [[ "$MIDONET_API_URL" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE MIDONET midonet_uri $MIDONET_API_URL fi @@ -68,17 +68,17 @@ function neutron_plugin_configure_service() { Q_L3_ROUTER_PER_TENANT=True } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.MidonetInterfaceDriver } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 0 means True here return 0 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { # 0 means True here return 1 } diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 4ceabe765d..e985dcb4a5 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -33,7 +33,7 @@ Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS:-} # L3 Plugin to load for ML2 ML2_L3_PLUGIN=${ML2_L3_PLUGIN:-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin} -function populate_ml2_config() { +function populate_ml2_config { CONF=$1 SECTION=$2 OPTS=$3 @@ -47,7 +47,7 @@ function populate_ml2_config() { done } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ml2 Q_PLUGIN_CONF_FILENAME=ml2_conf.ini Q_DB_NAME="neutron_ml2" @@ -57,7 +57,7 @@ function neutron_plugin_configure_common() { _neutron_service_plugin_class_add $ML2_L3_PLUGIN } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "" ]]; then Q_SRV_EXTRA_OPTS+=(tenant_network_types=$Q_ML2_TENANT_NETWORK_TYPE) elif [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then @@ -114,7 +114,7 @@ function neutron_plugin_configure_service() { populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vlan $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { return 0 } diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec index 1cb2fef533..6d4bfca244 100644 --- a/lib/neutron_plugins/nec +++ b/lib/neutron_plugins/nec @@ -22,11 +22,11 @@ OFC_RETRY_INTERVAL=${OFC_RETRY_INTERVAL:-1} source $TOP_DIR/lib/neutron_plugins/ovs_base -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { _neutron_ovs_base_configure_nova_vif_driver } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { # SKIP_OVS_INSTALL is useful when we want to use Open vSwitch whose # version is different from the version provided by the distribution. if [[ "$SKIP_OVS_INSTALL" = "True" ]]; then @@ -36,26 +36,26 @@ function neutron_plugin_install_agent_packages() { _neutron_ovs_base_install_agent_packages } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nec Q_PLUGIN_CONF_FILENAME=nec.ini Q_DB_NAME="neutron_nec" Q_PLUGIN_CLASS="neutron.plugins.nec.nec_plugin.NECPluginV2" } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { _neutron_ovs_base_configure_debug_command } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { : } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { _neutron_ovs_base_configure_l3_agent } -function _quantum_plugin_setup_bridge() { +function _quantum_plugin_setup_bridge { if [[ "$SKIP_OVS_BRIDGE_SETUP" = "True" ]]; then return fi @@ -72,7 +72,7 @@ function _quantum_plugin_setup_bridge() { _neutron_setup_ovs_tunnels $OVS_BRIDGE } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { _quantum_plugin_setup_bridge AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nec-agent" @@ -80,7 +80,7 @@ function neutron_plugin_configure_plugin_agent() { _neutron_ovs_base_configure_firewall_driver } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { iniset $NEUTRON_CONF DEFAULT api_extensions_path neutron/plugins/nec/extensions/ iniset /$Q_PLUGIN_CONF_FILE ofc host $OFC_API_HOST iniset /$Q_PLUGIN_CONF_FILE ofc port $OFC_API_PORT @@ -91,7 +91,7 @@ function neutron_plugin_configure_service() { _neutron_ovs_base_configure_firewall_driver } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver iniset $conf_file DEFAULT ovs_use_veth True @@ -101,7 +101,7 @@ function neutron_plugin_setup_interface_driver() { # --------------------------- # Setup OVS tunnel manually -function _neutron_setup_ovs_tunnels() { +function _neutron_setup_ovs_tunnels { local bridge=$1 local id=0 GRE_LOCAL_IP=${GRE_LOCAL_IP:-$HOST_IP} @@ -117,12 +117,12 @@ function _neutron_setup_ovs_tunnels() { fi } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 0 means True here return 0 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_plugins/openvswitch b/lib/neutron_plugins/openvswitch index f99eb383d8..bdbc5a9367 100644 --- a/lib/neutron_plugins/openvswitch +++ b/lib/neutron_plugins/openvswitch @@ -7,14 +7,14 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/openvswitch_agent -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/openvswitch Q_PLUGIN_CONF_FILENAME=ovs_neutron_plugin.ini Q_DB_NAME="ovs_neutron" Q_PLUGIN_CLASS="neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2" } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE ovs tenant_network_type gre iniset /$Q_PLUGIN_CONF_FILE ovs tunnel_id_ranges $TENANT_TUNNEL_RANGES @@ -52,7 +52,7 @@ function neutron_plugin_configure_service() { done } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { return 0 } diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index 46c2a5c6e2..3a2bdc316a 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -7,7 +7,7 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/ovs_base -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { _neutron_ovs_base_configure_nova_vif_driver if [ "$VIRT_DRIVER" = 'xenserver' ]; then iniset $NOVA_CONF DEFAULT xenapi_vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver @@ -17,24 +17,24 @@ function neutron_plugin_create_nova_conf() { fi } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { _neutron_ovs_base_install_agent_packages } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { _neutron_ovs_base_configure_debug_command } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { _neutron_ovs_base_configure_l3_agent iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { # Setup integration bridge _neutron_ovs_base_setup_bridge $OVS_BRIDGE _neutron_ovs_base_configure_firewall_driver @@ -118,12 +118,12 @@ function neutron_plugin_configure_plugin_agent() { done } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 89db29d07f..0a2ba58fbb 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -8,19 +8,19 @@ set +o xtrace OVS_BRIDGE=${OVS_BRIDGE:-br-int} PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { # Yes, we use OVS. return 0 } -function _neutron_ovs_base_setup_bridge() { +function _neutron_ovs_base_setup_bridge { local bridge=$1 neutron-ovs-cleanup sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge } -function neutron_ovs_base_cleanup() { +function neutron_ovs_base_cleanup { # remove all OVS ports that look like Neutron created ports for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do sudo ovs-vsctl del-port ${port} @@ -32,7 +32,7 @@ function neutron_ovs_base_cleanup() { done } -function _neutron_ovs_base_install_agent_packages() { +function _neutron_ovs_base_install_agent_packages { local kernel_version # Install deps # FIXME add to ``files/apts/neutron``, but don't install if not needed! @@ -50,11 +50,11 @@ function _neutron_ovs_base_install_agent_packages() { fi } -function _neutron_ovs_base_configure_debug_command() { +function _neutron_ovs_base_configure_debug_command { iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE } -function _neutron_ovs_base_configure_firewall_driver() { +function _neutron_ovs_base_configure_firewall_driver { if [[ "$Q_USE_SECGROUP" == "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver else @@ -62,7 +62,7 @@ function _neutron_ovs_base_configure_firewall_driver() { fi } -function _neutron_ovs_base_configure_l3_agent() { +function _neutron_ovs_base_configure_l3_agent { iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE neutron-ovs-cleanup @@ -72,7 +72,7 @@ function _neutron_ovs_base_configure_l3_agent() { sudo ip addr flush dev $PUBLIC_BRIDGE } -function _neutron_ovs_base_configure_nova_vif_driver() { +function _neutron_ovs_base_configure_nova_vif_driver { : } diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid index bccd301011..19f94cb78c 100644 --- a/lib/neutron_plugins/plumgrid +++ b/lib/neutron_plugins/plumgrid @@ -6,15 +6,15 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { : } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { : } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/plumgrid Q_PLUGIN_CONF_FILENAME=plumgrid.ini Q_DB_NAME="plumgrid_neutron" @@ -26,7 +26,7 @@ function neutron_plugin_configure_common() { PLUMGRID_TIMEOUT=${PLUMGRID_TIMEOUT:-70} } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector director_server $PLUMGRID_DIRECTOR_IP iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector director_server_port $PLUMGRID_DIRECTOR_PORT iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector username $PLUMGRID_ADMIN @@ -34,21 +34,21 @@ function neutron_plugin_configure_service() { iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector servertimeout $PLUMGRID_TIMEOUT } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { : } -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { # False return 1 } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # False return 1 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/ryu b/lib/neutron_plugins/ryu index 334c227cdb..9ae36d38fa 100644 --- a/lib/neutron_plugins/ryu +++ b/lib/neutron_plugins/ryu @@ -8,12 +8,12 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/ovs_base source $TOP_DIR/lib/neutron_thirdparty/ryu # for configuration value -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { _neutron_ovs_base_configure_nova_vif_driver iniset $NOVA_CONF DEFAULT libvirt_ovs_integration_bridge "$OVS_BRIDGE" } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { _neutron_ovs_base_install_agent_packages # neutron_ryu_agent requires ryu module @@ -22,28 +22,28 @@ function neutron_plugin_install_agent_packages() { configure_ryu } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ryu Q_PLUGIN_CONF_FILENAME=ryu.ini Q_DB_NAME="ovs_neutron" Q_PLUGIN_CLASS="neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2" } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { _neutron_ovs_base_configure_debug_command iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { iniset $Q_L3_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT _neutron_ovs_base_configure_l3_agent } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { # Set up integration bridge _neutron_ovs_base_setup_bridge $OVS_BRIDGE if [ -n "$RYU_INTERNAL_INTERFACE" ]; then @@ -55,24 +55,24 @@ function neutron_plugin_configure_plugin_agent() { _neutron_ovs_base_configure_firewall_driver } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { iniset /$Q_PLUGIN_CONF_FILE ovs openflow_rest_api $RYU_API_HOST:$RYU_API_PORT _neutron_ovs_base_configure_firewall_driver } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver iniset $conf_file DEFAULT ovs_use_veth True } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 0 means True here return 0 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall index 8273e54e6c..ab6c32426a 100644 --- a/lib/neutron_plugins/services/firewall +++ b/lib/neutron_plugins/services/firewall @@ -7,11 +7,11 @@ set +o xtrace FWAAS_PLUGIN=neutron.services.firewall.fwaas_plugin.FirewallPlugin -function neutron_fwaas_configure_common() { +function neutron_fwaas_configure_common { _neutron_service_plugin_class_add $FWAAS_PLUGIN } -function neutron_fwaas_configure_driver() { +function neutron_fwaas_configure_driver { FWAAS_DRIVER_CONF_FILENAME=/etc/neutron/fwaas_driver.ini cp $NEUTRON_DIR/etc/fwaas_driver.ini $FWAAS_DRIVER_CONF_FILENAME @@ -19,7 +19,7 @@ function neutron_fwaas_configure_driver() { iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas driver "neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver" } -function neutron_fwaas_stop() { +function neutron_fwaas_stop { : } diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer index 5d7a94e5d8..744826e49d 100644 --- a/lib/neutron_plugins/services/loadbalancer +++ b/lib/neutron_plugins/services/loadbalancer @@ -9,7 +9,7 @@ set +o xtrace AGENT_LBAAS_BINARY="$NEUTRON_BIN_DIR/neutron-lbaas-agent" LBAAS_PLUGIN=neutron.services.loadbalancer.plugin.LoadBalancerPlugin -function neutron_agent_lbaas_install_agent_packages() { +function neutron_agent_lbaas_install_agent_packages { if is_ubuntu || is_fedora; then install_package haproxy elif is_suse; then @@ -18,11 +18,11 @@ function neutron_agent_lbaas_install_agent_packages() { fi } -function neutron_agent_lbaas_configure_common() { +function neutron_agent_lbaas_configure_common { _neutron_service_plugin_class_add $LBAAS_PLUGIN } -function neutron_agent_lbaas_configure_agent() { +function neutron_agent_lbaas_configure_agent { LBAAS_AGENT_CONF_PATH=/etc/neutron/services/loadbalancer/haproxy mkdir -p $LBAAS_AGENT_CONF_PATH @@ -41,7 +41,7 @@ function neutron_agent_lbaas_configure_agent() { fi } -function neutron_lbaas_stop() { +function neutron_lbaas_stop { pids=$(ps aux | awk '/haproxy/ { print $2 }') [ ! -z "$pids" ] && sudo kill $pids } diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering index 37952bbabd..0e5f75b27b 100644 --- a/lib/neutron_plugins/services/metering +++ b/lib/neutron_plugins/services/metering @@ -9,11 +9,11 @@ set +o xtrace AGENT_METERING_BINARY="$NEUTRON_BIN_DIR/neutron-metering-agent" METERING_PLUGIN="neutron.services.metering.metering_plugin.MeteringPlugin" -function neutron_agent_metering_configure_common() { +function neutron_agent_metering_configure_common { _neutron_service_plugin_class_add $METERING_PLUGIN } -function neutron_agent_metering_configure_agent() { +function neutron_agent_metering_configure_agent { METERING_AGENT_CONF_PATH=/etc/neutron/services/metering mkdir -p $METERING_AGENT_CONF_PATH @@ -22,7 +22,7 @@ function neutron_agent_metering_configure_agent() { cp $NEUTRON_DIR/etc/metering_agent.ini $METERING_AGENT_CONF_FILENAME } -function neutron_metering_stop() { +function neutron_metering_stop { : } diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn index 02370e7f85..e56d3613c2 100644 --- a/lib/neutron_plugins/services/vpn +++ b/lib/neutron_plugins/services/vpn @@ -10,15 +10,15 @@ AGENT_VPN_BINARY="$NEUTRON_BIN_DIR/neutron-vpn-agent" VPN_PLUGIN="neutron.services.vpn.plugin.VPNDriverPlugin" IPSEC_PACKAGE=${IPSEC_PACKAGE:-"openswan"} -function neutron_vpn_install_agent_packages() { +function neutron_vpn_install_agent_packages { install_package $IPSEC_PACKAGE } -function neutron_vpn_configure_common() { +function neutron_vpn_configure_common { _neutron_service_plugin_class_add $VPN_PLUGIN } -function neutron_vpn_stop() { +function neutron_vpn_stop { local ipsec_data_dir=$DATA_DIR/neutron/ipsec local pids if [ -d $ipsec_data_dir ]; then diff --git a/lib/neutron_plugins/vmware_nsx b/lib/neutron_plugins/vmware_nsx index d506cb6f8d..0930422e4e 100644 --- a/lib/neutron_plugins/vmware_nsx +++ b/lib/neutron_plugins/vmware_nsx @@ -7,7 +7,7 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/ovs_base -function setup_integration_bridge() { +function setup_integration_bridge { _neutron_ovs_base_setup_bridge $OVS_BRIDGE # Set manager to NSX controller (1st of list) if [[ "$NSX_CONTROLLERS" != "" ]]; then @@ -20,24 +20,24 @@ function setup_integration_bridge() { sudo ovs-vsctl set-manager ssl:$OVS_MGR_IP } -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { # NSX uses OVS, but not the l3-agent return 0 } -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { # if n-cpu is enabled, then setup integration bridge if is_service_enabled n-cpu; then setup_integration_bridge fi } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { # VMware NSX Plugin does not run q-agt, but it currently needs dhcp and metadata agents _neutron_ovs_base_install_agent_packages } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware Q_PLUGIN_CONF_FILENAME=nsx.ini Q_DB_NAME="neutron_nsx" @@ -45,29 +45,29 @@ function neutron_plugin_configure_common() { Q_PLUGIN_CLASS="neutron.plugins.nicira.NeutronPlugin.NvpPluginV2" } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge "$PUBLIC_BRIDGE" } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { setup_integration_bridge iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network True iniset $Q_DHCP_CONF_FILE DEFAULT ovs_use_veth True } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { # VMware NSX plugin does not run L3 agent die $LINENO "q-l3 should must not be executed with VMware NSX plugin!" } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { # VMware NSX plugin does not run L2 agent die $LINENO "q-agt must not be executed with VMware NSX plugin!" } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { if [[ "$MAX_LP_PER_BRIDGED_LS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS fi @@ -132,17 +132,17 @@ function neutron_plugin_configure_service() { fi } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 0 means True here return 0 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight index 24c10443b7..f03de56295 100644 --- a/lib/neutron_thirdparty/bigswitch_floodlight +++ b/lib/neutron_thirdparty/bigswitch_floodlight @@ -8,11 +8,11 @@ set +o xtrace BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} BS_FL_OF_PORT=${BS_FL_OF_PORT:-6633} -function configure_bigswitch_floodlight() { +function configure_bigswitch_floodlight { : } -function init_bigswitch_floodlight() { +function init_bigswitch_floodlight { install_neutron_agent_packages echo -n "Installing OVS managed by the openflow controllers:" @@ -32,19 +32,19 @@ function init_bigswitch_floodlight() { sudo ovs-vsctl --no-wait set-controller ${OVS_BRIDGE} ${ctrls} } -function install_bigswitch_floodlight() { +function install_bigswitch_floodlight { : } -function start_bigswitch_floodlight() { +function start_bigswitch_floodlight { : } -function stop_bigswitch_floodlight() { +function stop_bigswitch_floodlight { : } -function check_bigswitch_floodlight() { +function check_bigswitch_floodlight { : } diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet index 98be4254fc..ad417bbc29 100644 --- a/lib/neutron_thirdparty/midonet +++ b/lib/neutron_thirdparty/midonet @@ -20,28 +20,28 @@ MIDONET_CLIENT_DIR=${MIDONET_CLIENT_DIR:-$MIDONET_DIR/python-midonetclient} MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -function configure_midonet() { +function configure_midonet { : } -function init_midonet() { +function init_midonet { : } -function install_midonet() { +function install_midonet { git_clone $MIDONET_CLIENT_REPO $MIDONET_CLIENT_DIR $MIDONET_CLIENT_BRANCH export PYTHONPATH=$MIDONET_CLIENT_DIR/src:$PYTHONPATH } -function start_midonet() { +function start_midonet { : } -function stop_midonet() { +function stop_midonet { : } -function check_midonet() { +function check_midonet { : } diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu index 5edf273361..424a90041e 100644 --- a/lib/neutron_thirdparty/ryu +++ b/lib/neutron_thirdparty/ryu @@ -21,14 +21,14 @@ RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest} # configure_ryu can be called multiple times as neutron_pluing/ryu may call # this function for neutron-ryu-agent _RYU_CONFIGURED=${_RYU_CONFIGURED:-False} -function configure_ryu() { +function configure_ryu { if [[ "$_RYU_CONFIGURED" == "False" ]]; then setup_develop $RYU_DIR _RYU_CONFIGURED=True fi } -function init_ryu() { +function init_ryu { RYU_CONF_DIR=/etc/ryu if [[ ! -d $RYU_CONF_DIR ]]; then sudo mkdir -p $RYU_CONF_DIR @@ -60,22 +60,22 @@ neutron_controller_addr=tcp:$RYU_OFP_HOST:$RYU_OFP_PORT # Make this function idempotent and avoid cloning same repo many times # with RECLONE=yes _RYU_INSTALLED=${_RYU_INSTALLED:-False} -function install_ryu() { +function install_ryu { if [[ "$_RYU_INSTALLED" == "False" ]]; then git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH _RYU_INSTALLED=True fi } -function start_ryu() { +function start_ryu { screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --config-file $RYU_CONF" } -function stop_ryu() { +function stop_ryu { : } -function check_ryu() { +function check_ryu { : } diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema index 2b125646dc..d465ac753e 100644 --- a/lib/neutron_thirdparty/trema +++ b/lib/neutron_thirdparty/trema @@ -31,7 +31,7 @@ TREMA_SS_CONFIG=$TREMA_SS_ETC_DIR/sliceable.conf TREMA_SS_APACHE_CONFIG=/etc/apache2/sites-available/sliceable_switch.conf # configure_trema - Set config files, create data dirs, etc -function configure_trema() { +function configure_trema { # prepare dir for d in $TREMA_SS_ETC_DIR $TREMA_SS_DB_DIR $TREMA_SS_SCRIPT_DIR; do sudo mkdir -p $d @@ -41,7 +41,7 @@ function configure_trema() { } # init_trema - Initialize databases, etc. -function init_trema() { +function init_trema { local _pwd=$(pwd) # Initialize databases for Sliceable Switch @@ -70,7 +70,7 @@ function init_trema() { $TREMA_SS_CONFIG } -function gem_install() { +function gem_install { [[ "$OFFLINE" = "True" ]] && return [ -n "$RUBYGEMS_CMD" ] || get_gem_command @@ -79,7 +79,7 @@ function gem_install() { sudo $RUBYGEMS_CMD install $pkg } -function get_gem_command() { +function get_gem_command { # Trema requires ruby 1.8, so gem1.8 is checked first RUBYGEMS_CMD=$(which gem1.8 || which gem) if [ -z "$RUBYGEMS_CMD" ]; then @@ -87,7 +87,7 @@ function get_gem_command() { fi } -function install_trema() { +function install_trema { # Trema gem_install trema # Sliceable Switch @@ -97,7 +97,7 @@ function install_trema() { make -C $TREMA_DIR/apps/sliceable_switch } -function start_trema() { +function start_trema { # APACHE_NAME is defined in init_horizon (in lib/horizon) restart_service $APACHE_NAME @@ -105,11 +105,11 @@ function start_trema() { trema run -d -c $TREMA_SS_CONFIG } -function stop_trema() { +function stop_trema { sudo TREMA_TMP=$TREMA_TMP_DIR trema killall } -function check_trema() { +function check_trema { : } diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx index 4eb177a458..3fecc62560 100644 --- a/lib/neutron_thirdparty/vmware_nsx +++ b/lib/neutron_thirdparty/vmware_nsx @@ -22,11 +22,11 @@ NSX_GATEWAY_NETWORK_INTERFACE=${NSX_GATEWAY_NETWORK_INTERFACE:-eth2} # is invoked by unstack.sh FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} -function configure_vmware_nsx() { +function configure_vmware_nsx { : } -function init_vmware_nsx() { +function init_vmware_nsx { if ! is_set NSX_GATEWAY_NETWORK_CIDR; then NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address to set on br-ex was not specified. " @@ -52,15 +52,15 @@ function init_vmware_nsx() { sudo ip addr add dev $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_CIDR } -function install_vmware_nsx() { +function install_vmware_nsx { : } -function start_vmware_nsx() { +function start_vmware_nsx { : } -function stop_vmware_nsx() { +function stop_vmware_nsx { if ! is_set NSX_GATEWAY_NETWORK_CIDR; then NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address expected on br-ex was not specified. " @@ -78,7 +78,7 @@ function stop_vmware_nsx() { done } -function check_vmware_nsx() { +function check_vmware_nsx { neutron-check-nsx-config $NEUTRON_CONF_DIR/plugins/vmware/nsx.ini } diff --git a/lib/nova b/lib/nova index fefeda1236..90b1ba4fde 100644 --- a/lib/nova +++ b/lib/nova @@ -144,7 +144,7 @@ function is_n-cell_enabled { } # Helper to clean iptables rules -function clean_iptables() { +function clean_iptables { # Delete rules sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables",$0}' | bash # Delete nat rules @@ -157,7 +157,7 @@ function clean_iptables() { # cleanup_nova() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_nova() { +function cleanup_nova { if is_service_enabled n-cpu; then # Clean iptables from previous runs clean_iptables @@ -191,7 +191,7 @@ function cleanup_nova() { } # configure_nova_rootwrap() - configure Nova's rootwrap -function configure_nova_rootwrap() { +function configure_nova_rootwrap { # Deploy new rootwrap filters files (owned by root). # Wipe any existing rootwrap.d files first if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then @@ -219,7 +219,7 @@ function configure_nova_rootwrap() { } # configure_nova() - Set config files, create data dirs, etc -function configure_nova() { +function configure_nova { # Put config files in ``/etc/nova`` for everyone to find if [[ ! -d $NOVA_CONF_DIR ]]; then sudo mkdir -p $NOVA_CONF_DIR @@ -367,7 +367,7 @@ create_nova_accounts() { } # create_nova_conf() - Create a new nova.conf file -function create_nova_conf() { +function create_nova_conf { # Remove legacy ``nova.conf`` rm -f $NOVA_DIR/bin/nova.conf @@ -515,7 +515,7 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" } -function init_nova_cells() { +function init_nova_cells { if is_service_enabled n-cell; then cp $NOVA_CONF $NOVA_CELLS_CONF iniset $NOVA_CELLS_CONF DEFAULT sql_connection `database_connection_url $NOVA_CELLS_DB` @@ -542,14 +542,14 @@ function init_nova_cells() { } # create_nova_cache_dir() - Part of the init_nova() process -function create_nova_cache_dir() { +function create_nova_cache_dir { # Create cache dir sudo mkdir -p $NOVA_AUTH_CACHE_DIR sudo chown $STACK_USER $NOVA_AUTH_CACHE_DIR rm -f $NOVA_AUTH_CACHE_DIR/* } -function create_nova_conf_nova_network() { +function create_nova_conf_nova_network { iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER" iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE" iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE" @@ -560,14 +560,14 @@ function create_nova_conf_nova_network() { } # create_nova_keys_dir() - Part of the init_nova() process -function create_nova_keys_dir() { +function create_nova_keys_dir { # Create keys dir sudo mkdir -p ${NOVA_STATE_PATH}/keys sudo chown -R $STACK_USER ${NOVA_STATE_PATH} } # init_nova() - Initialize databases, etc. -function init_nova() { +function init_nova { # All nova components talk to a central database. # Only do this step once on the API node for an entire cluster. if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then @@ -596,14 +596,14 @@ function init_nova() { } # install_novaclient() - Collect source and prepare -function install_novaclient() { +function install_novaclient { git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH setup_develop $NOVACLIENT_DIR sudo install -D -m 0644 -o $STACK_USER {$NOVACLIENT_DIR/tools/,/etc/bash_completion.d/}nova.bash_completion } # install_nova() - Collect source and prepare -function install_nova() { +function install_nova { if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then install_nova_hypervisor fi @@ -638,7 +638,7 @@ function install_nova() { } # start_nova_api() - Start the API process ahead of other things -function start_nova_api() { +function start_nova_api { # Get right service port for testing local service_port=$NOVA_SERVICE_PORT if is_service_enabled tls-proxy; then @@ -658,7 +658,7 @@ function start_nova_api() { } # start_nova_compute() - Start the compute process -function start_nova_compute() { +function start_nova_compute { if is_service_enabled n-cell; then local compute_cell_conf=$NOVA_CELLS_CONF else @@ -693,7 +693,7 @@ function start_nova_compute() { } # start_nova() - Start running processes, including screen -function start_nova_rest() { +function start_nova_rest { local api_cell_conf=$NOVA_CONF if is_service_enabled n-cell; then local compute_cell_conf=$NOVA_CELLS_CONF @@ -722,13 +722,13 @@ function start_nova_rest() { screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore --config-file $api_cell_conf" } -function start_nova() { +function start_nova { start_nova_compute start_nova_rest } # stop_nova() - Stop running processes (non-screen) -function stop_nova() { +function stop_nova { # Kill the nova screen windows # Some services are listed here twice since more than one instance # of a service may be running in certain configs. diff --git a/lib/nova_plugins/hypervisor-baremetal b/lib/nova_plugins/hypervisor-baremetal index 660c977bde..2da1097027 100644 --- a/lib/nova_plugins/hypervisor-baremetal +++ b/lib/nova_plugins/hypervisor-baremetal @@ -33,13 +33,13 @@ STUB_NETWORK=${STUB_NETWORK:-False} # ------------ # clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor() { +function cleanup_nova_hypervisor { # This function intentionally left blank : } # configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor() { +function configure_nova_hypervisor { configure_baremetal_nova_dirs iniset $NOVA_CONF baremetal sql_connection `database_connection_url nova_bm` @@ -67,19 +67,19 @@ function configure_nova_hypervisor() { } # install_nova_hypervisor() - Install external components -function install_nova_hypervisor() { +function install_nova_hypervisor { # This function intentionally left blank : } # start_nova_hypervisor - Start any required external services -function start_nova_hypervisor() { +function start_nova_hypervisor { # This function intentionally left blank : } # stop_nova_hypervisor - Stop any external services -function stop_nova_hypervisor() { +function stop_nova_hypervisor { # This function intentionally left blank : } diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index b5df19db02..f8dc6afa19 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -44,7 +44,7 @@ DOCKER_APT_REPO=${DOCKER_APT_REPO:-https://get.docker.io/ubuntu} # ------------ # clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor() { +function cleanup_nova_hypervisor { stop_service docker # Clean out work area @@ -52,13 +52,13 @@ function cleanup_nova_hypervisor() { } # configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor() { +function configure_nova_hypervisor { iniset $NOVA_CONF DEFAULT compute_driver docker.DockerDriver iniset $GLANCE_API_CONF DEFAULT container_formats ami,ari,aki,bare,ovf,docker } # install_nova_hypervisor() - Install external components -function install_nova_hypervisor() { +function install_nova_hypervisor { # So far this is Ubuntu only if ! is_ubuntu; then die $LINENO "Docker is only supported on Ubuntu at this time" @@ -77,7 +77,7 @@ function install_nova_hypervisor() { } # start_nova_hypervisor - Start any required external services -function start_nova_hypervisor() { +function start_nova_hypervisor { local docker_pid read docker_pid <$DOCKER_PID_FILE if [[ -z $docker_pid ]] || ! ps -p $docker_pid | grep [d]ocker; then @@ -111,7 +111,7 @@ function start_nova_hypervisor() { } # stop_nova_hypervisor - Stop any external services -function stop_nova_hypervisor() { +function stop_nova_hypervisor { # Stop the docker registry container docker kill $(docker ps | grep docker-registry | cut -d' ' -f1) } diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake index fe0d1900ee..e7a833f806 100644 --- a/lib/nova_plugins/hypervisor-fake +++ b/lib/nova_plugins/hypervisor-fake @@ -27,13 +27,13 @@ set +o xtrace # ------------ # clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor() { +function cleanup_nova_hypervisor { # This function intentionally left blank : } # configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor() { +function configure_nova_hypervisor { iniset $NOVA_CONF DEFAULT compute_driver "nova.virt.fake.FakeDriver" # Disable arbitrary limits iniset $NOVA_CONF DEFAULT quota_instances -1 @@ -51,19 +51,19 @@ function configure_nova_hypervisor() { } # install_nova_hypervisor() - Install external components -function install_nova_hypervisor() { +function install_nova_hypervisor { # This function intentionally left blank : } # start_nova_hypervisor - Start any required external services -function start_nova_hypervisor() { +function start_nova_hypervisor { # This function intentionally left blank : } # stop_nova_hypervisor - Stop any external services -function stop_nova_hypervisor() { +function stop_nova_hypervisor { # This function intentionally left blank : } diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index a550600363..b39c57c74a 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -31,13 +31,13 @@ ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False} # ------------ # clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor() { +function cleanup_nova_hypervisor { # This function intentionally left blank : } # configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor() { +function configure_nova_hypervisor { if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces cat </dev/null; then echo "Found old oslo.config... removing to ensure consistency" diff --git a/lib/rpc_backend b/lib/rpc_backend index 34f576f5b8..a0424b1dee 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -25,7 +25,7 @@ set +o xtrace # Make sure we only have one rpc backend enabled. # Also check the specified rpc backend is available on your platform. -function check_rpc_backend() { +function check_rpc_backend { local rpc_needed=1 # We rely on the fact that filenames in lib/* match the service names # that can be passed as arguments to is_service_enabled. @@ -91,7 +91,7 @@ function cleanup_rpc_backend { } # install rpc backend -function install_rpc_backend() { +function install_rpc_backend { if is_service_enabled rabbit; then # Install rabbitmq-server # the temp file is necessary due to LP: #878600 @@ -135,7 +135,7 @@ function install_rpc_backend() { } # restart the rpc backend -function restart_rpc_backend() { +function restart_rpc_backend { if is_service_enabled rabbit; then # Start rabbitmq-server echo_summary "Starting RabbitMQ" @@ -165,7 +165,7 @@ function restart_rpc_backend() { } # iniset cofiguration -function iniset_rpc_backend() { +function iniset_rpc_backend { local package=$1 local file=$2 local section=$3 @@ -193,7 +193,7 @@ function iniset_rpc_backend() { # Check if qpid can be used on the current distro. # qpid_is_supported -function qpid_is_supported() { +function qpid_is_supported { if [[ -z "$DISTRO" ]]; then GetDistro fi diff --git a/lib/savanna b/lib/savanna index 954f0e711e..d7152b1e6f 100644 --- a/lib/savanna +++ b/lib/savanna @@ -55,7 +55,7 @@ TEMPEST_SERVICES+=,savanna # Tenant User Roles # ------------------------------ # service savanna admin -function create_savanna_accounts() { +function create_savanna_accounts { SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") @@ -88,14 +88,14 @@ function create_savanna_accounts() { # cleanup_savanna() - Remove residual data files, anything left over from # previous runs that would need to clean up. -function cleanup_savanna() { +function cleanup_savanna { # Cleanup auth cache dir sudo rm -rf $SAVANNA_AUTH_CACHE_DIR } # configure_savanna() - Set config files, create data dirs, etc -function configure_savanna() { +function configure_savanna { if [[ ! -d $SAVANNA_CONF_DIR ]]; then sudo mkdir -p $SAVANNA_CONF_DIR @@ -142,18 +142,18 @@ function configure_savanna() { } # install_savanna() - Collect source and prepare -function install_savanna() { +function install_savanna { git_clone $SAVANNA_REPO $SAVANNA_DIR $SAVANNA_BRANCH setup_develop $SAVANNA_DIR } # start_savanna() - Start running processes, including screen -function start_savanna() { +function start_savanna { screen_it savanna "cd $SAVANNA_DIR && $SAVANNA_BIN_DIR/savanna-api --config-file $SAVANNA_CONF_FILE" } # stop_savanna() - Stop running processes -function stop_savanna() { +function stop_savanna { # Kill the Savanna screen windows screen -S $SCREEN_NAME -p savanna -X kill } diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard index 691b23f6e8..6fe15a3c81 100644 --- a/lib/savanna-dashboard +++ b/lib/savanna-dashboard @@ -35,7 +35,7 @@ SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient # Functions # --------- -function configure_savanna_dashboard() { +function configure_savanna_dashboard { echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)" >> $HORIZON_DIR/openstack_dashboard/settings.py @@ -47,19 +47,19 @@ function configure_savanna_dashboard() { } # install_savanna_dashboard() - Collect source and prepare -function install_savanna_dashboard() { +function install_savanna_dashboard { install_python_savannaclient git_clone $SAVANNA_DASHBOARD_REPO $SAVANNA_DASHBOARD_DIR $SAVANNA_DASHBOARD_BRANCH setup_develop $SAVANNA_DASHBOARD_DIR } -function install_python_savannaclient() { +function install_python_savannaclient { git_clone $SAVANNA_PYTHONCLIENT_REPO $SAVANNA_PYTHONCLIENT_DIR $SAVANNA_PYTHONCLIENT_BRANCH setup_develop $SAVANNA_PYTHONCLIENT_DIR } # Cleanup file settings.py from Savanna -function cleanup_savanna_dashboard() { +function cleanup_savanna_dashboard { sed -i '/savanna/d' $HORIZON_DIR/openstack_dashboard/settings.py } diff --git a/lib/stackforge b/lib/stackforge index 5fa4570b74..dca08cc2c2 100644 --- a/lib/stackforge +++ b/lib/stackforge @@ -34,7 +34,7 @@ PECAN_DIR=$DEST/pecan # ------------ # install_stackforge() - Collect source and prepare -function install_stackforge() { +function install_stackforge { # TODO(sdague): remove this once we get to Icehouse, this just makes # for a smoother transition of existing users. cleanup_stackforge @@ -47,7 +47,7 @@ function install_stackforge() { } # cleanup_stackforge() - purge possibly old versions of stackforge libraries -function cleanup_stackforge() { +function cleanup_stackforge { # this means we've got an old version installed, lets get rid of it # otherwise python hates itself for lib in wsme pecan; do diff --git a/lib/swift b/lib/swift index 6c33af5082..59c1e54d8a 100644 --- a/lib/swift +++ b/lib/swift @@ -126,7 +126,7 @@ function is_swift_enabled { } # cleanup_swift() - Remove residual data files -function cleanup_swift() { +function cleanup_swift { rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 @@ -141,7 +141,7 @@ function cleanup_swift() { } # _cleanup_swift_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file -function _cleanup_swift_apache_wsgi() { +function _cleanup_swift_apache_wsgi { sudo rm -f $SWIFT_APACHE_WSGI_DIR/*.wsgi disable_apache_site proxy-server for node_number in ${SWIFT_REPLICAS_SEQ}; do @@ -154,7 +154,7 @@ function _cleanup_swift_apache_wsgi() { } # _config_swift_apache_wsgi() - Set WSGI config files of Swift -function _config_swift_apache_wsgi() { +function _config_swift_apache_wsgi { sudo mkdir -p ${SWIFT_APACHE_WSGI_DIR} local apache_vhost_dir=/etc/${APACHE_NAME}/$APACHE_CONF_DIR local proxy_port=${SWIFT_DEFAULT_BIND_PORT:-8080} @@ -233,7 +233,7 @@ function _config_swift_apache_wsgi() { # This function generates an object/container/account configuration # emulating 4 nodes on different ports -function generate_swift_config() { +function generate_swift_config { local swift_node_config=$1 local node_id=$2 local bind_port=$3 @@ -272,7 +272,7 @@ function generate_swift_config() { # configure_swift() - Set config files, create data dirs and loop image -function configure_swift() { +function configure_swift { local swift_pipeline="${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH}" local node_number local swift_node_config @@ -460,7 +460,7 @@ EOF } # create_swift_disk - Create Swift backing disk -function create_swift_disk() { +function create_swift_disk { local node_number # First do a bit of setup by creating the directories and @@ -520,7 +520,7 @@ function create_swift_disk() { # swifttenanttest1 swiftusertest3 anotherrole # swifttenanttest2 swiftusertest2 admin -function create_swift_accounts() { +function create_swift_accounts { # Defines specific passwords used by tools/create_userrc.sh SWIFTUSERTEST1_PASSWORD=testing SWIFTUSERTEST2_PASSWORD=testing2 @@ -578,7 +578,7 @@ function create_swift_accounts() { } # init_swift() - Initialize rings -function init_swift() { +function init_swift { local node_number # Make sure to kill all swift processes first swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true @@ -612,7 +612,7 @@ function init_swift() { rm -f $SWIFT_AUTH_CACHE_DIR/* } -function install_swift() { +function install_swift { git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH setup_develop $SWIFT_DIR if is_apache_enabled_service swift; then @@ -620,13 +620,13 @@ function install_swift() { fi } -function install_swiftclient() { +function install_swiftclient { git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH setup_develop $SWIFTCLIENT_DIR } # start_swift() - Start running processes, including screen -function start_swift() { +function start_swift { # (re)start rsyslog restart_service rsyslog # (re)start memcached to make sure we have a clean memcache. @@ -674,7 +674,7 @@ function start_swift() { } # stop_swift() - Stop running processes (non-screen) -function stop_swift() { +function stop_swift { if is_apache_enabled_service swift; then swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0 diff --git a/lib/tempest b/lib/tempest index 410c80c46d..16f8744d85 100644 --- a/lib/tempest +++ b/lib/tempest @@ -70,7 +70,7 @@ IPV6_ENABLED=$(trueorfalse True $IPV6_ENABLED) # --------- # configure_tempest() - Set config files, create data dirs, etc -function configure_tempest() { +function configure_tempest { setup_develop $TEMPEST_DIR local image_lines local images @@ -359,12 +359,12 @@ function configure_tempest() { } # install_tempest() - Collect source and prepare -function install_tempest() { +function install_tempest { git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH } # init_tempest() - Initialize ec2 images -function init_tempest() { +function init_tempest { local base_image_name=cirros-0.3.1-x86_64 # /opt/stack/devstack/files/images/cirros-0.3.1-x86_64-uec local image_dir="$FILES/images/${base_image_name}-uec" diff --git a/lib/template b/lib/template index b8e7c4d86f..efe5826f15 100644 --- a/lib/template +++ b/lib/template @@ -45,7 +45,7 @@ function is_XXXX_enabled { # cleanup_XXXX() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_XXXX() { +function cleanup_XXXX { # kill instances (nova) # delete image files (glance) # This function intentionally left blank @@ -53,7 +53,7 @@ function cleanup_XXXX() { } # configure_XXXX() - Set config files, create data dirs, etc -function configure_XXXX() { +function configure_XXXX { # sudo python setup.py deploy # iniset $XXXX_CONF ... # This function intentionally left blank @@ -61,26 +61,26 @@ function configure_XXXX() { } # init_XXXX() - Initialize databases, etc. -function init_XXXX() { +function init_XXXX { # clean up from previous (possibly aborted) runs # create required data files : } # install_XXXX() - Collect source and prepare -function install_XXXX() { +function install_XXXX { # git clone xxx : } # start_XXXX() - Start running processes, including screen -function start_XXXX() { +function start_XXXX { # screen_it XXXX "cd $XXXX_DIR && $XXXX_DIR/bin/XXXX-bin" : } # stop_XXXX() - Stop running processes (non-screen) -function stop_XXXX() { +function stop_XXXX { # FIXME(dtroyer): stop only our screen screen window? : } diff --git a/lib/tls b/lib/tls index 6134fa1bad..072059d599 100644 --- a/lib/tls +++ b/lib/tls @@ -61,7 +61,7 @@ STUD_CIPHERS='TLSv1+HIGH:!DES:!aNULL:!eNULL:@STRENGTH' OPENSSL=${OPENSSL:-/usr/bin/openssl} # Do primary CA configuration -function configure_CA() { +function configure_CA { # build common config file # Verify ``TLS_IP`` is good @@ -73,7 +73,7 @@ function configure_CA() { # Creates a new CA directory structure # create_CA_base ca-dir -function create_CA_base() { +function create_CA_base { local ca_dir=$1 if [[ -d $ca_dir ]]; then @@ -92,7 +92,7 @@ function create_CA_base() { # Create a new CA configuration file # create_CA_config ca-dir common-name -function create_CA_config() { +function create_CA_config { local ca_dir=$1 local common_name=$2 @@ -145,7 +145,7 @@ keyUsage = cRLSign, keyCertSign # Create a new signing configuration file # create_signing_config ca-dir -function create_signing_config() { +function create_signing_config { local ca_dir=$1 echo " @@ -225,7 +225,7 @@ function init_cert { # make_cert creates and signs a new certificate with the given commonName and CA # make_cert ca-dir cert-name "common-name" ["alt-name" ...] -function make_cert() { +function make_cert { local ca_dir=$1 local cert_name=$2 local common_name=$3 @@ -261,7 +261,7 @@ function make_cert() { # Make an intermediate CA to sign everything else # make_int_CA ca-dir signing-ca-dir -function make_int_CA() { +function make_int_CA { local ca_dir=$1 local signing_ca_dir=$2 @@ -291,7 +291,7 @@ function make_int_CA() { # Make a root CA to sign other CAs # make_root_CA ca-dir -function make_root_CA() { +function make_root_CA { local ca_dir=$1 # Create the root CA @@ -319,7 +319,7 @@ function make_root_CA() { # is a short-circuit boolean, i.e it returns on the first match. # # Uses global ``SSL_ENABLED_SERVICES`` -function is_ssl_enabled_service() { +function is_ssl_enabled_service { services=$@ for service in ${services}; do [[ ,${SSL_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 @@ -337,7 +337,7 @@ function is_ssl_enabled_service() { # example for keystone this would be KEYSTONE_SSL_CERT, KEYSTONE_SSL_KEY and # KEYSTONE_SSL_CA. If it does not find these certificates the program will # quit. -function ensure_certificates() { +function ensure_certificates { local service=$1 local cert_var="${service}_SSL_CERT" @@ -362,7 +362,7 @@ function ensure_certificates() { # Starts the TLS proxy for the given IP/ports # start_tls_proxy front-host front-port back-host back-port -function start_tls_proxy() { +function start_tls_proxy { local f_host=$1 local f_port=$2 local b_host=$3 diff --git a/lib/trove b/lib/trove index 6834149c64..75b990f91e 100644 --- a/lib/trove +++ b/lib/trove @@ -53,7 +53,7 @@ function is_trove_enabled { } # setup_trove_logging() - Adds logging configuration to conf files -function setup_trove_logging() { +function setup_trove_logging { local CONF=$1 iniset $CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $CONF DEFAULT use_syslog $SYSLOG @@ -69,7 +69,7 @@ function setup_trove_logging() { # ------------------------------------------------------------------ # service trove admin # if enabled -create_trove_accounts() { +function create_trove_accounts { # Trove SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") SERVICE_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") @@ -106,19 +106,19 @@ create_trove_accounts() { # cleanup_trove() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_trove() { +function cleanup_trove { #Clean up dirs rm -fr $TROVE_AUTH_CACHE_DIR/* rm -fr $TROVE_CONF_DIR/* } # configure_troveclient() - Set config files, create data dirs, etc -function configure_troveclient() { +function configure_troveclient { setup_develop $TROVECLIENT_DIR } # configure_trove() - Set config files, create data dirs, etc -function configure_trove() { +function configure_trove { setup_develop $TROVE_DIR # Create the trove conf dir and cache dirs if they don't exist @@ -182,17 +182,17 @@ function configure_trove() { } # install_troveclient() - Collect source and prepare -function install_troveclient() { +function install_troveclient { git_clone $TROVECLIENT_REPO $TROVECLIENT_DIR $TROVECLIENT_BRANCH } # install_trove() - Collect source and prepare -function install_trove() { +function install_trove { git_clone $TROVE_REPO $TROVE_DIR $TROVE_BRANCH } # init_trove() - Initializes Trove Database as a Service -function init_trove() { +function init_trove { #(Re)Create trove db recreate_database trove utf8 @@ -201,14 +201,14 @@ function init_trove() { } # start_trove() - Start running processes, including screen -function start_trove() { +function start_trove { screen_it tr-api "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug 2>&1" screen_it tr-tmgr "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug 2>&1" screen_it tr-cond "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug 2>&1" } # stop_trove() - Stop running processes -function stop_trove() { +function stop_trove { # Kill the trove screen windows for serv in tr-api tr-tmgr tr-cond; do screen_stop $serv diff --git a/stack.sh b/stack.sh index ac89e52515..a70267984c 100755 --- a/stack.sh +++ b/stack.sh @@ -464,7 +464,7 @@ fi # ----------------- # Draw a spinner so the user knows something is happening -function spinner() { +function spinner { local delay=0.75 local spinstr='/-\|' printf "..." >&3 @@ -479,7 +479,7 @@ function spinner() { # Echo text to the log file, summary log file and stdout # echo_summary "something to say" -function echo_summary() { +function echo_summary { if [[ -t 3 && "$VERBOSE" != "True" ]]; then kill >/dev/null 2>&1 $LAST_SPINNER_PID if [ ! -z "$LAST_SPINNER_PID" ]; then @@ -495,7 +495,7 @@ function echo_summary() { # Echo text only to stdout, no log files # echo_nolog "something not for the logs" -function echo_nolog() { +function echo_nolog { echo $@ >&3 } diff --git a/tests/functions.sh b/tests/functions.sh index 06a4134abf..874d02230d 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -42,7 +42,7 @@ fi echo "Testing enable_service()" -function test_enable_service() { +function test_enable_service { local start="$1" local add="$2" local finish="$3" @@ -68,7 +68,7 @@ test_enable_service 'a,b,c' c 'a,b,c' test_enable_service 'a,b,-c' c 'a,b' test_enable_service 'a,b,c' -c 'a,b' -function test_disable_service() { +function test_disable_service { local start="$1" local del="$2" local finish="$3" @@ -109,7 +109,7 @@ fi echo "Testing disable_negated_services()" -function test_disable_negated_services() { +function test_disable_negated_services { local start="$1" local finish="$2" diff --git a/tests/test_config.sh b/tests/test_config.sh index 39603c9dbe..5700f8df29 100755 --- a/tests/test_config.sh +++ b/tests/test_config.sh @@ -12,7 +12,7 @@ source $TOP/lib/config # check_result() tests and reports the result values # check_result "actual" "expected" -function check_result() { +function check_result { local actual=$1 local expected=$2 if [[ "$actual" == "$expected" ]]; then @@ -26,7 +26,7 @@ TEST_1C_ADD="[eee] type=new multi = foo2" -function create_test1c() { +function create_test1c { cat >test1c.conf <test2a.conf <\w+)", line) @@ -169,6 +184,7 @@ def check_files(files, verbose): check_indents(logical_line) check_for_do(logical_line) check_if_then(logical_line) + check_function_decl(logical_line) prev_line = logical_line prev_lineno = fileinput.filelineno() diff --git a/tools/build_pxe_env.sh b/tools/build_pxe_env.sh index e6f98b4b75..50d91d063c 100755 --- a/tools/build_pxe_env.sh +++ b/tools/build_pxe_env.sh @@ -17,7 +17,7 @@ PXEDIR=${PXEDIR:-/opt/ramstack/pxe} PROGDIR=`dirname $0` # Clean up any resources that may be in use -cleanup() { +function cleanup { set +o errexit # Mop up temporary files diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh index 737255578a..50ba8ef2ca 100755 --- a/tools/build_ramdisk.sh +++ b/tools/build_ramdisk.sh @@ -14,7 +14,7 @@ if [ ! "$#" -eq "1" ]; then fi # Clean up any resources that may be in use -cleanup() { +function cleanup { set +o errexit # Mop up temporary files @@ -87,7 +87,7 @@ fi # Finds and returns full device path for the next available NBD device. # Exits script if error connecting or none free. # map_nbd image -function map_nbd() { +function map_nbd { for i in `seq 0 15`; do if [ ! -e /sys/block/nbd$i/pid ]; then NBD=/dev/nbd$i diff --git a/tools/build_uec_ramdisk.sh b/tools/build_uec_ramdisk.sh index 3ab5dafdcb..5f3acc5684 100755 --- a/tools/build_uec_ramdisk.sh +++ b/tools/build_uec_ramdisk.sh @@ -20,7 +20,7 @@ if ! egrep -q "oneiric" /etc/lsb-release; then fi # Clean up resources that may be in use -cleanup() { +function cleanup { set +o errexit if [ -n "$MNT_DIR" ]; then diff --git a/tools/build_usb_boot.sh b/tools/build_usb_boot.sh index 8566229833..c97e0a143d 100755 --- a/tools/build_usb_boot.sh +++ b/tools/build_usb_boot.sh @@ -13,7 +13,7 @@ DEST_DIR=${1:-/tmp/syslinux-boot} PXEDIR=${PXEDIR:-/opt/ramstack/pxe} # Clean up any resources that may be in use -cleanup() { +function cleanup { set +o errexit # Mop up temporary files diff --git a/tools/copy_dev_environment_to_uec.sh b/tools/copy_dev_environment_to_uec.sh index 3fd4423f86..94a4926668 100755 --- a/tools/copy_dev_environment_to_uec.sh +++ b/tools/copy_dev_environment_to_uec.sh @@ -22,7 +22,7 @@ cd $TOP_DIR source ./stackrc # Echo usage -usage() { +function usage { echo "Add stack user and keys" echo "" echo "Usage: $0 [full path to raw uec base image]" diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index cd5a1c9643..47da3341b8 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -11,8 +11,7 @@ set -o xtrace ACCOUNT_DIR=./accrc -display_help() -{ +function display_help { cat < @@ -151,7 +150,7 @@ if ! nova x509-get-root-cert "$EUCALYPTUS_CERT"; then fi -function add_entry(){ +function add_entry { local user_id=$1 local user_name=$2 local tenant_id=$3 @@ -213,7 +212,7 @@ EOF } #admin users expected -function create_or_get_tenant(){ +function create_or_get_tenant { local tenant_name=$1 local tenant_id=`keystone tenant-list | awk '/\|[[:space:]]*'"$tenant_name"'[[:space:]]*\|.*\|/ {print $2}'` if [ -n "$tenant_id" ]; then @@ -223,7 +222,7 @@ function create_or_get_tenant(){ fi } -function create_or_get_role(){ +function create_or_get_role { local role_name=$1 local role_id=`keystone role-list| awk '/\|[[:space:]]*'"$role_name"'[[:space:]]*\|/ {print $2}'` if [ -n "$role_id" ]; then @@ -234,7 +233,7 @@ function create_or_get_role(){ } # Provides empty string when the user does not exists -function get_user_id(){ +function get_user_id { local user_name=$1 keystone user-list | awk '/^\|[^|]*\|[[:space:]]*'"$user_name"'[[:space:]]*\|.*\|/ {print $2}' } diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 47b0cd10cd..7833278a12 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -40,7 +40,7 @@ FILES=$TOP_DIR/files # --------------- # get_package_path python-package # in import notation -function get_package_path() { +function get_package_path { local package=$1 echo $(python -c "import os; import $package; print(os.path.split(os.path.realpath($package.__file__))[0])") } diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh index da13f4b875..225742c041 100755 --- a/tools/get_uec_image.sh +++ b/tools/get_uec_image.sh @@ -18,7 +18,7 @@ TOP_DIR=$(cd $TOOLS_DIR/..; pwd) set -o errexit set -o xtrace -usage() { +function usage { echo "Usage: $0 - Download and prepare Ubuntu UEC images" echo "" echo "$0 [-r rootsize] release imagefile [kernel]" @@ -31,7 +31,7 @@ usage() { } # Clean up any resources that may be in use -cleanup() { +function cleanup { set +o errexit # Mop up temporary files diff --git a/tools/info.sh b/tools/info.sh index 1e521b9c4b..a8f9544073 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -61,7 +61,7 @@ fi # ----- # git_report -function git_report() { +function git_report { local dir=$1 local proj ref branch head if [[ -d $dir/.git ]]; then diff --git a/tools/install_openvpn.sh b/tools/install_openvpn.sh index 2f52aa14d0..9a4f0369d5 100755 --- a/tools/install_openvpn.sh +++ b/tools/install_openvpn.sh @@ -22,7 +22,7 @@ if [ -e vpnrc ]; then fi # Do some IP manipulation -function cidr2netmask() { +function cidr2netmask { set -- $(( 5 - ($1 / 8) )) 255 255 255 255 $(( (255 << (8 - ($1 % 8))) & 255 )) 0 0 0 if [[ $1 -gt 1 ]]; then shift $1 @@ -50,7 +50,7 @@ VPN_CLIENT_DHCP="${VPN_CLIENT_DHCP:-net.1 net.254}" VPN_DIR=/etc/openvpn CA_DIR=$VPN_DIR/easy-rsa -usage() { +function usage { echo "$0 - OpenVPN install and certificate generation" echo "" echo "$0 --client name" @@ -102,7 +102,7 @@ if [ ! -r $CA_DIR/keys/dh1024.pem ]; then openvpn --genkey --secret $CA_DIR/keys/ta.key ## Build a TLS key fi -do_server() { +function do_server { NAME=$1 # Generate server certificate $CA_DIR/pkitool --server $NAME @@ -162,7 +162,7 @@ EOF /etc/init.d/openvpn restart } -do_client() { +function do_client { NAME=$1 # Generate a client certificate $CA_DIR/pkitool $NAME diff --git a/tools/install_pip.sh b/tools/install_pip.sh index d714d33530..9fa161e043 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -50,7 +50,7 @@ PIP_TAR_URL=https://pypi.python.org/packages/source/p/pip/pip-$INSTALL_PIP_VERSI GetDistro echo "Distro: $DISTRO" -function get_versions() { +function get_versions { PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || true) if [[ -n $PIP ]]; then PIP_VERSION=$($PIP --version | awk '{ print $2}') @@ -61,7 +61,7 @@ function get_versions() { } -function install_get_pip() { +function install_get_pip { if [[ ! -r $FILES/get-pip.py ]]; then (cd $FILES; \ curl -O $PIP_GET_PIP_URL; \ @@ -70,7 +70,7 @@ function install_get_pip() { sudo -E python $FILES/get-pip.py } -function install_pip_tarball() { +function install_pip_tarball { (cd $FILES; \ curl -O $PIP_TAR_URL; \ tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz 1>/dev/null; \ diff --git a/tools/jenkins/build_configuration.sh b/tools/jenkins/build_configuration.sh index e295ef2017..64ee159651 100755 --- a/tools/jenkins/build_configuration.sh +++ b/tools/jenkins/build_configuration.sh @@ -5,7 +5,7 @@ CONFIGURATION=$2 ADAPTER=$3 RC=$4 -function usage() { +function usage { echo "Usage: $0 - Build a configuration" echo "" echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh index d9a160ad76..6927fd7c29 100755 --- a/tools/jenkins/configurations/kvm.sh +++ b/tools/jenkins/configurations/kvm.sh @@ -9,7 +9,7 @@ CONFIGURATION=$2 ADAPTER=$3 RC=$4 -function usage() { +function usage { echo "Usage: $0 - Build a test configuration" echo "" echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" diff --git a/tools/jenkins/configurations/xs.sh b/tools/jenkins/configurations/xs.sh index 864f949114..7b671e9df4 100755 --- a/tools/jenkins/configurations/xs.sh +++ b/tools/jenkins/configurations/xs.sh @@ -8,7 +8,7 @@ CONFIGURATION=$2 ADAPTER=$3 RC=$4 -function usage() { +function usage { echo "Usage: $0 - Build a test configuration" echo "" echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" diff --git a/tools/jenkins/run_test.sh b/tools/jenkins/run_test.sh index 464956375e..d2b82843b4 100755 --- a/tools/jenkins/run_test.sh +++ b/tools/jenkins/run_test.sh @@ -4,7 +4,7 @@ EXECUTOR_NUMBER=$1 ADAPTER=$2 RC=$3 -function usage() { +function usage { echo "Usage: $0 - Run a test" echo "" echo "$0 [EXECUTOR_NUMBER] [ADAPTER] [RC (optional)]" diff --git a/tools/warm_apts_for_uec.sh b/tools/warm_apts_for_uec.sh index 3c15f52ee3..c57fc2e59c 100755 --- a/tools/warm_apts_for_uec.sh +++ b/tools/warm_apts_for_uec.sh @@ -16,7 +16,7 @@ TOP_DIR=`cd $TOOLS_DIR/..; pwd` cd $TOP_DIR # Echo usage -usage() { +function usage { echo "Cache OpenStack dependencies on a uec image to speed up performance." echo "" echo "Usage: $0 [full path to raw uec base image]" diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index fbbfd6fbe5..cc3cbe18d1 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -42,7 +42,7 @@ source xenrc # GUEST_NAME="$1" -function _print_interface_config() { +function _print_interface_config { local device_nr local ip_address local netmask @@ -68,7 +68,7 @@ function _print_interface_config() { echo " post-up ethtool -K $device tx off" } -function print_interfaces_config() { +function print_interfaces_config { echo "auto lo" echo "iface lo inet loopback" diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 7b59bae6b8..a4b3e06e88 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -166,7 +166,7 @@ TNAME="jeos_template_for_devstack" SNAME_TEMPLATE="jeos_snapshot_for_devstack" SNAME_FIRST_BOOT="before_first_boot" -function wait_for_VM_to_halt() { +function wait_for_VM_to_halt { set +x echo "Waiting for the VM to halt. Progress in-VM can be checked with vncviewer:" mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.') @@ -318,7 +318,7 @@ xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_FIRST_BOOT" # xe vm-start vm="$GUEST_NAME" -function ssh_no_check() { +function ssh_no_check { ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$@" } @@ -349,7 +349,7 @@ DOMID=$(get_domid "$GUEST_NAME") xenstore-write /local/domain/$DOMID/authorized_keys/$DOMZERO_USER "$(cat /root/dom0key.pub)" xenstore-chmod -u /local/domain/$DOMID/authorized_keys/$DOMZERO_USER r$DOMID -function run_on_appliance() { +function run_on_appliance { ssh \ -i /root/dom0key \ -o UserKnownHostsFile=/dev/null \ diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index 094612624b..440774ec5b 100755 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -21,7 +21,7 @@ STACK_USER="$3" DOMZERO_USER="$4" -function setup_domzero_user() { +function setup_domzero_user { local username username="$1" From e2907b4838230940a8ff1735feffd80acf13bdab Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 26 Feb 2014 17:35:37 -0600 Subject: [PATCH 0518/4438] Fix Neutron enabled check * Remove the check for neutron enabled on a block of variable settings, there is no conflict and serves no purpose. * Also floating_ips.sh and volume.sh needed to properly source lib/neutron for do ping_check() to work properly. The current error in check-devstack-dsvm-neutron is not related to this fix. Change-Id: I1c458aaa787ffb98c945aefc3afa80c6861a405f --- exercises/floating_ips.sh | 6 +- exercises/volumes.sh | 7 +- lib/neutron | 154 +++++++++++++++++++------------------- 3 files changed, 84 insertions(+), 83 deletions(-) diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index b981aa8294..8dc44effbc 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -27,12 +27,12 @@ TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions source $TOP_DIR/functions -# Import project functions -source $TOP_DIR/lib/neutron - # Import configuration source $TOP_DIR/openrc +# Import project functions +source $TOP_DIR/lib/neutron + # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 33e24589eb..83d25c779c 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -27,12 +27,13 @@ TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions source $TOP_DIR/functions -# Import project functions -source $TOP_DIR/lib/cinder - # Import configuration source $TOP_DIR/openrc +# Import project functions +source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/neutron + # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/lib/neutron b/lib/neutron index df276c71d5..be123adcd5 100644 --- a/lib/neutron +++ b/lib/neutron @@ -59,10 +59,6 @@ # LinuxBridge plugin, please see the top level README file under the # Neutron section. -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - # Neutron Network Configuration # ----------------------------- @@ -127,82 +123,81 @@ Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-False} # See _configure_neutron_common() for details about setting it up declare -a Q_PLUGIN_EXTRA_CONF_FILES -if is_service_enabled neutron; then - Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf - if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - Q_RR_COMMAND="sudo" - else - NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) - Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" - fi - - # Provider Network Configurations - # -------------------------------- - - # The following variables control the Neutron openvswitch and - # linuxbridge plugins' allocation of tenant networks and - # availability of provider networks. If these are not configured - # in ``localrc``, tenant networks will be local to the host (with no - # remote connectivity), and no physical resources will be - # available for the allocation of provider networks. - - # To use GRE tunnels for tenant networks, set to True in - # ``localrc``. GRE tunnels are only supported by the openvswitch - # plugin, and currently only on Ubuntu. - ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False} - - # If using GRE tunnels for tenant networks, specify the range of - # tunnel IDs from which tenant networks are allocated. Can be - # overriden in ``localrc`` in necesssary. - TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000} - - # To use VLANs for tenant networks, set to True in localrc. VLANs - # are supported by the openvswitch and linuxbridge plugins, each - # requiring additional configuration described below. - ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} - - # If using VLANs for tenant networks, set in ``localrc`` to specify - # the range of VLAN VIDs from which tenant networks are - # allocated. An external network switch must be configured to - # trunk these VLANs between hosts for multi-host connectivity. - # - # Example: ``TENANT_VLAN_RANGE=1000:1999`` - TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} - - # If using VLANs for tenant networks, or if using flat or VLAN - # provider networks, set in ``localrc`` to the name of the physical - # network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the - # openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge - # agent, as described below. - # - # Example: ``PHYSICAL_NETWORK=default`` - PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} - - # With the openvswitch plugin, if using VLANs for tenant networks, - # or if using flat or VLAN provider networks, set in ``localrc`` to - # the name of the OVS bridge to use for the physical network. The - # bridge will be created if it does not already exist, but a - # physical interface must be manually added to the bridge as a - # port for external connectivity. - # - # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` - OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} - - # With the linuxbridge plugin, if using VLANs for tenant networks, - # or if using flat or VLAN provider networks, set in ``localrc`` to - # the name of the network interface to use for the physical - # network. - # - # Example: ``LB_PHYSICAL_INTERFACE=eth1`` - LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} - # With the openvswitch plugin, set to True in ``localrc`` to enable - # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. - # - # Example: ``OVS_ENABLE_TUNNELING=True`` - OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} +Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf +if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + Q_RR_COMMAND="sudo" +else + NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) + Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" fi +# Provider Network Configurations +# -------------------------------- + +# The following variables control the Neutron openvswitch and +# linuxbridge plugins' allocation of tenant networks and +# availability of provider networks. If these are not configured +# in ``localrc``, tenant networks will be local to the host (with no +# remote connectivity), and no physical resources will be +# available for the allocation of provider networks. + +# To use GRE tunnels for tenant networks, set to True in +# ``localrc``. GRE tunnels are only supported by the openvswitch +# plugin, and currently only on Ubuntu. +ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False} + +# If using GRE tunnels for tenant networks, specify the range of +# tunnel IDs from which tenant networks are allocated. Can be +# overriden in ``localrc`` in necesssary. +TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000} + +# To use VLANs for tenant networks, set to True in localrc. VLANs +# are supported by the openvswitch and linuxbridge plugins, each +# requiring additional configuration described below. +ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} + +# If using VLANs for tenant networks, set in ``localrc`` to specify +# the range of VLAN VIDs from which tenant networks are +# allocated. An external network switch must be configured to +# trunk these VLANs between hosts for multi-host connectivity. +# +# Example: ``TENANT_VLAN_RANGE=1000:1999`` +TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} + +# If using VLANs for tenant networks, or if using flat or VLAN +# provider networks, set in ``localrc`` to the name of the physical +# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the +# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge +# agent, as described below. +# +# Example: ``PHYSICAL_NETWORK=default`` +PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} + +# With the openvswitch plugin, if using VLANs for tenant networks, +# or if using flat or VLAN provider networks, set in ``localrc`` to +# the name of the OVS bridge to use for the physical network. The +# bridge will be created if it does not already exist, but a +# physical interface must be manually added to the bridge as a +# port for external connectivity. +# +# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` +OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} + +# With the linuxbridge plugin, if using VLANs for tenant networks, +# or if using flat or VLAN provider networks, set in ``localrc`` to +# the name of the network interface to use for the physical +# network. +# +# Example: ``LB_PHYSICAL_INTERFACE=eth1`` +LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} + +# With the openvswitch plugin, set to True in ``localrc`` to enable +# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. +# +# Example: ``OVS_ENABLE_TUNNELING=True`` +OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} + # Neutron plugin specific functions # --------------------------------- @@ -241,6 +236,11 @@ fi TEMPEST_SERVICES+=,neutron +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + # Functions # --------- From 1237922b655d8ab1690b88c718d7002415ce1201 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 27 Feb 2014 17:16:46 -0500 Subject: [PATCH 0519/4438] make service_check fatal if we fail service check, we should do so in a fatal way, because something is not right. This will be very useful in grenade. Change-Id: I18811b0d8e6d06f364685c366cdc8f5dda3b8f7e --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 2248fbb610..ab7bc2139b 100644 --- a/functions-common +++ b/functions-common @@ -1135,7 +1135,7 @@ function service_check() { done if [ -n "$failures" ]; then - echo "More details about the above errors can be found with screen, with ./rejoin-stack.sh" + die $LINENO "More details about the above errors can be found with screen, with ./rejoin-stack.sh" fi } From c03f975150bf97b5aef42daa77fc419a9e241123 Mon Sep 17 00:00:00 2001 From: sukhdev Date: Thu, 27 Feb 2014 14:17:44 -0800 Subject: [PATCH 0520/4438] devstack (stack.sh) fails when extra config files are specified Latest merge of https://review.openstack.org/#/c/71996/ exposes an issue in stack.sh which did not surface before. Please see the details of the issue in the bug description. Closes bug: 1285884 Change-Id: Ie231c9835497c2a418a61d339dfd5df1aab9e3d7 --- lib/neutron | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index df276c71d5..07b21af336 100644 --- a/lib/neutron +++ b/lib/neutron @@ -586,11 +586,9 @@ function _configure_neutron_common() { # If additional config files exist, copy them over to neutron configuration # directory if [[ $Q_PLUGIN_EXTRA_CONF_PATH != '' ]]; then - mkdir -p /$Q_PLUGIN_EXTRA_CONF_PATH local f for (( f=0; $f < ${#Q_PLUGIN_EXTRA_CONF_FILES[@]}; f+=1 )); do Q_PLUGIN_EXTRA_CONF_FILES[$f]=$Q_PLUGIN_EXTRA_CONF_PATH/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} - cp $NEUTRON_DIR/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} /${Q_PLUGIN_EXTRA_CONF_FILES[$f]} done fi From 531aeb7900fd7f24794efb8f9da5fce65dc80f4b Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 28 Feb 2014 11:24:29 +1100 Subject: [PATCH 0521/4438] Preinstall yum-utils; move sudo check before install Some cloud images don't have yum-utils installed, so the call to yum-config-manager fails. Pre-install it (I still think it's easier than fiddling config files). Also, these repo setup steps are using sudo, but the root/sudo checks happen after this. Move them up before we start trying to do repo/package installs. Change-Id: I875e1f0663c9badc00278b2cc1a3b04ca3dde9fc --- stack.sh | 91 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 46 insertions(+), 45 deletions(-) diff --git a/stack.sh b/stack.sh index ac89e52515..669209c865 100755 --- a/stack.sh +++ b/stack.sh @@ -161,9 +161,42 @@ fi # Set up logging level VERBOSE=$(trueorfalse True $VERBOSE) +# root Access +# ----------- + +# OpenStack is designed to be run as a non-root user; Horizon will fail to run +# as **root** since Apache will not serve content from **root** user). +# ``stack.sh`` must not be run as **root**. It aborts and suggests one course of +# action to create a suitable user account. + +if [[ $EUID -eq 0 ]]; then + echo "You are running this script as root." + echo "Cut it out." + echo "Really." + echo "If you need an account to run DevStack, do this (as root, heh) to create $STACK_USER:" + echo "$TOP_DIR/tools/create-stack-user.sh" + exit 1 +fi + +# We're not **root**, make sure ``sudo`` is available +is_package_installed sudo || install_package sudo + +# UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one +sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || + echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers + +# Set up devstack sudoers +TEMPFILE=`mktemp` +echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE +# Some binaries might be under /sbin or /usr/sbin, so make sure sudo will +# see them by forcing PATH +echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE +chmod 0440 $TEMPFILE +sudo chown root:root $TEMPFILE +sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh # Additional repos -# ================ +# ---------------- # Some distros need to add repos beyond the defaults provided by the vendor # to pick up required packages. @@ -196,45 +229,13 @@ if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then fi # ... and also optional to be enabled + is_package_installed yum-utils || install_package yum-utils sudo yum-config-manager --enable rhel-6-server-optional-rpms fi - -# root Access -# ----------- - -# OpenStack is designed to be run as a non-root user; Horizon will fail to run -# as **root** since Apache will not serve content from **root** user). -# ``stack.sh`` must not be run as **root**. It aborts and suggests one course of -# action to create a suitable user account. - -if [[ $EUID -eq 0 ]]; then - echo "You are running this script as root." - echo "Cut it out." - echo "Really." - echo "If you need an account to run DevStack, do this (as root, heh) to create $STACK_USER:" - echo "$TOP_DIR/tools/create-stack-user.sh" - exit 1 -fi - -# We're not **root**, make sure ``sudo`` is available -is_package_installed sudo || install_package sudo - -# UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one -sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || - echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers - -# Set up devstack sudoers -TEMPFILE=`mktemp` -echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE -# Some binaries might be under /sbin or /usr/sbin, so make sure sudo will -# see them by forcing PATH -echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE -chmod 0440 $TEMPFILE -sudo chown root:root $TEMPFILE -sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh - +# Filesystem setup +# ---------------- # Create the destination directory and ensure it is writable by the user # and read/executable by everybody for daemons (e.g. apache run for horizon) @@ -252,6 +253,15 @@ if [ -z "`grep ^127.0.0.1 /etc/hosts | grep $LOCAL_HOSTNAME`" ]; then sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts fi +# Destination path for service data +DATA_DIR=${DATA_DIR:-${DEST}/data} +sudo mkdir -p $DATA_DIR +safe_chown -R $STACK_USER $DATA_DIR + + +# Common Configuration +# -------------------- + # Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without # Internet access. ``stack.sh`` must have been previously run with Internet # access to install prerequisites and fetch repositories. @@ -265,15 +275,6 @@ ERROR_ON_CLONE=`trueorfalse False $ERROR_ON_CLONE` # Whether to enable the debug log level in OpenStack services ENABLE_DEBUG_LOG_LEVEL=`trueorfalse True $ENABLE_DEBUG_LOG_LEVEL` -# Destination path for service data -DATA_DIR=${DATA_DIR:-${DEST}/data} -sudo mkdir -p $DATA_DIR -safe_chown -R $STACK_USER $DATA_DIR - - -# Common Configuration -# ==================== - # Set fixed and floating range here so we can make sure not to use addresses # from either range when attempting to guess the IP to use for the host. # Note that setting FIXED_RANGE may be necessary when running DevStack From cb415697f37d3df2965f71b19c909a4c50f32eed Mon Sep 17 00:00:00 2001 From: Shashank Hegde Date: Thu, 27 Feb 2014 16:46:43 -0800 Subject: [PATCH 0522/4438] clean.sh removes all the files clean.sh was incorrectly looping over the list of files to remove. Because of this the files were not being removed. Change-Id: Ie0559e1d396a4d35df6a12dfbceefa7eb261bac5 Closes-Bug:1285924 --- clean.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clean.sh b/clean.sh index e121e4f703..3707d8411e 100755 --- a/clean.sh +++ b/clean.sh @@ -123,6 +123,6 @@ fi FILES_TO_CLEAN=".localrc.auto docs-files docs/ shocco/ stack-screenrc test*.conf* test.ini*" FILES_TO_CLEAN+=".stackenv .prereqs" -for file in FILES_TO_CLEAN; do +for file in $FILES_TO_CLEAN; do rm -f $TOP_DIR/$file done From d20f632a70565003ab8c72b2598201be79f4d782 Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Fri, 28 Feb 2014 09:22:37 +0900 Subject: [PATCH 0523/4438] Move some comments of variables to right place setup_develop*() in functions has been moved to functions-common. But some comments about the variables are still left. This commit moves it to the right place. Change-Id: Ic360454f1ee72f51c9979d0468dee0913e9b32e4 --- functions | 4 ---- functions-common | 3 +++ 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/functions b/functions index 3101111c63..407a9e708c 100644 --- a/functions +++ b/functions @@ -6,10 +6,6 @@ # - ``ENABLED_SERVICES`` # - ``FILES`` # - ``GLANCE_HOSTPORT`` -# - ``REQUIREMENTS_DIR`` -# - ``STACK_USER`` -# - ``TRACK_DEPENDS`` -# - ``UNDO_REQUIREMENTS`` # # Include the common functions diff --git a/functions-common b/functions-common index c93dd855b3..a485cae9d9 100644 --- a/functions-common +++ b/functions-common @@ -26,7 +26,10 @@ # - ``PIP_DOWNLOAD_CACHE`` # - ``PIP_USE_MIRRORS`` # - ``RECLONE`` +# - ``REQUIREMENTS_DIR`` +# - ``STACK_USER`` # - ``TRACK_DEPENDS`` +# - ``UNDO_REQUIREMENTS`` # - ``http_proxy``, ``https_proxy``, ``no_proxy`` # Save trace setting From 9bbecb7fc45538bc83d7db5e33a55505a691b44d Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Fri, 28 Feb 2014 11:19:28 -0500 Subject: [PATCH 0524/4438] Source lib/neutron in boot_from_volume.sh Without lib/neutron, boot_from_volume.sh generates the following error: + _ping_check_neutron private 10.11.12.5 30 /devstack/functions: line 356: _ping_check_neutron: command not found Change-Id: Ib72c3f24d614570d69bf5dda35cbaf5847b1d1b9 --- exercises/boot_from_volume.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 79120460b8..f679669eea 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -32,6 +32,7 @@ source $TOP_DIR/functions # Import project functions source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/neutron # Import configuration source $TOP_DIR/openrc From 0e57b967e558fa843277d0119e50f0cb807929a2 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Fri, 28 Feb 2014 09:09:52 +0100 Subject: [PATCH 0525/4438] Devstack install can fail on missing xinetd.d/rsync config Assuming if the system does not have the xinetd.d/rsync, the dedicated service is the prefered way. Change-Id: Ic42651c5c3fb5bf0099786ca81a7bd06ace896a8 --- lib/swift | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/swift b/lib/swift index 59c1e54d8a..5d4d4ef506 100644 --- a/lib/swift +++ b/lib/swift @@ -301,7 +301,7 @@ function configure_swift { # rsyncd.conf just prepared for 4 nodes if is_ubuntu; then sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync - else + elif [ -e /etc/xinetd.d/rsync ]; then sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync fi @@ -635,8 +635,10 @@ function start_swift { # Start rsync if is_ubuntu; then sudo /etc/init.d/rsync restart || : + elif [ -e /etc/xinetd.d/rsync ]; then + start_service xinetd else - sudo systemctl start xinetd.service + start_service rsyncd fi if is_apache_enabled_service swift; then From 2e978dd6286a33af72796dc97cd81ed5fa2255de Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Fri, 28 Feb 2014 14:06:59 -0500 Subject: [PATCH 0526/4438] Add use_syslog to Marconi config This patch adds use_syslog option to the marconi config file. This is needed to allow marconi to run, when USE_SCREEN is set to False in devstack. Change-Id: I547697ec2745975e235a4e58cde81132ac37b70d --- lib/marconi | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/marconi b/lib/marconi index 8cfc55c1dd..29ae386d9f 100644 --- a/lib/marconi +++ b/lib/marconi @@ -95,6 +95,7 @@ function configure_marconi { sudo chown $USER $MARCONI_API_LOG_DIR iniset $MARCONI_CONF DEFAULT verbose True + iniset $MARCONI_CONF DEFAULT use_syslog $SYSLOG iniset $MARCONI_CONF 'drivers:transport:wsgi' bind $MARCONI_SERVICE_HOST iniset $MARCONI_CONF keystone_authtoken auth_protocol http From e994f5708d124ae71211876e9456499ac25646a3 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Fri, 28 Feb 2014 15:13:37 -0500 Subject: [PATCH 0527/4438] Remove bm_poseur, unmaintained and obsolete The bm_poseur git repository link has been broken for over 11 months. The virtualized/fake baremetal environment is not working and has not worked in a long time. Now, on the tail of enabling 'enable -o errexit', this functionality now has a hard break. Change-Id: I3cbd8db58c422bc5273d2433278aaa5e449ecfd9 Closes-Bug: 1285954 --- lib/baremetal | 44 ++++---------------------------------------- stack.sh | 3 --- stackrc | 6 ------ unstack.sh | 5 ----- 4 files changed, 4 insertions(+), 54 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index 473de0dd39..1d02e1e417 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -77,14 +77,6 @@ BM_POWER_MANAGER=${BM_POWER_MANAGER:-nova.virt.baremetal.fake.FakePowerManager} # These should be customized to your environment and hardware # ----------------------------------------------------------- -# whether to create a fake environment, eg. for devstack-gate -BM_USE_FAKE_ENV=`trueorfalse False $BM_USE_FAKE_ENV` - -# Extra options to pass to bm_poseur -# change the bridge name or IP: --bridge br99 --bridge-ip 192.0.2.1 -# change the virtualization type: --engine qemu -BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-} - # To provide PXE, configure nova-network's dnsmasq rather than run the one # dedicated to baremetal. When enable this, make sure these conditions are # fulfilled: @@ -97,15 +89,10 @@ BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-} BM_DNSMASQ_FROM_NOVA_NETWORK=`trueorfalse False $BM_DNSMASQ_FROM_NOVA_NETWORK` # BM_DNSMASQ_IFACE should match FLAT_NETWORK_BRIDGE -if [ "$BM_USE_FAKE_ENV" ]; then - BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-br99} - BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-192.0.2.32,192.0.2.48} -else - BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0} - # if testing on a physical network, - # BM_DNSMASQ_RANGE must be changed to suit your network - BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-} -fi +BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0} +# if testing on a physical network, +# BM_DNSMASQ_RANGE must be changed to suit your network +BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-} # BM_DNSMASQ_DNS provide dns server to bootstrap clients BM_DNSMASQ_DNS=${BM_DNSMASQ_DNS:-} @@ -143,7 +130,6 @@ BM_FLAVOR_ARCH=${BM_FLAVOR_ARCH:-$BM_CPU_ARCH} # Below this, we set some path and filenames. # Defaults are probably sufficient. BM_IMAGE_BUILD_DIR=${BM_IMAGE_BUILD_DIR:-$DEST/diskimage-builder} -BM_POSEUR_DIR=${BM_POSEUR_DIR:-$DEST/bm_poseur} # Use DIB to create deploy ramdisk and kernel. BM_BUILD_DEPLOY_RAMDISK=`trueorfalse True $BM_BUILD_DEPLOY_RAMDISK` @@ -177,7 +163,6 @@ function is_baremetal { # so that we can build the deployment kernel & ramdisk function prepare_baremetal_toolchain { git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH - git_clone $BM_POSEUR_REPO $BM_POSEUR_DIR $BM_POSEUR_BRANCH local shellinabox_basename=$(basename $BM_SHELL_IN_A_BOX) if [[ ! -e $DEST/$shellinabox_basename ]]; then @@ -196,27 +181,6 @@ function prepare_baremetal_toolchain { fi } -# set up virtualized environment for devstack-gate testing -function create_fake_baremetal_env { - local bm_poseur="$BM_POSEUR_DIR/bm_poseur" - # TODO(deva): add support for >1 VM - sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-bridge - sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-vm - BM_FIRST_MAC=$(sudo $bm_poseur get-macs) - - # NOTE: there is currently a limitation in baremetal driver - # that requires second MAC even if it is not used. - # Passing a fake value allows this to work. - # TODO(deva): remove this after driver issue is fixed. - BM_SECOND_MAC='12:34:56:78:90:12' -} - -function cleanup_fake_baremetal_env { - local bm_poseur="$BM_POSEUR_DIR/bm_poseur" - sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-vm - sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-bridge -} - # prepare various directories needed by baremetal hypervisor function configure_baremetal_nova_dirs { # ensure /tftpboot is prepared diff --git a/stack.sh b/stack.sh index 0ec0e0dc93..5152b2a430 100755 --- a/stack.sh +++ b/stack.sh @@ -1052,9 +1052,6 @@ if is_service_enabled nova && is_baremetal; then echo_summary "Preparing for nova baremetal" prepare_baremetal_toolchain configure_baremetal_nova_dirs - if [[ "$BM_USE_FAKE_ENV" = "True" ]]; then - create_fake_baremetal_env - fi fi diff --git a/stackrc b/stackrc index f235cccb15..6bb6f37195 100644 --- a/stackrc +++ b/stackrc @@ -229,12 +229,6 @@ TEMPEST_BRANCH=${TEMPEST_BRANCH:-master} BM_IMAGE_BUILD_REPO=${BM_IMAGE_BUILD_REPO:-${GIT_BASE}/openstack/diskimage-builder.git} BM_IMAGE_BUILD_BRANCH=${BM_IMAGE_BUILD_BRANCH:-master} -# bm_poseur -# Used to simulate a hardware environment for baremetal -# Only used if BM_USE_FAKE_ENV is set -BM_POSEUR_REPO=${BM_POSEUR_REPO:-${GIT_BASE}/tripleo/bm_poseur.git} -BM_POSEUR_BRANCH=${BM_POSEUR_BRANCH:-master} - # a websockets/html5 or flash powered VNC console for vm instances NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git} NOVNC_BRANCH=${NOVNC_BRANCH:-master} diff --git a/unstack.sh b/unstack.sh index 6351fe0549..a5e7b879f9 100755 --- a/unstack.sh +++ b/unstack.sh @@ -127,11 +127,6 @@ if is_service_enabled tls-proxy; then killall stud fi -# baremetal might have created a fake environment -if is_service_enabled baremetal && [[ "$BM_USE_FAKE_ENV" = "True" ]]; then - cleanup_fake_baremetal_env -fi - SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* # Get the iSCSI volumes From 8f084c6b855a747467274facb1218837e0f53c88 Mon Sep 17 00:00:00 2001 From: Nicolas Simonds Date: Fri, 28 Feb 2014 17:01:41 -0800 Subject: [PATCH 0528/4438] use "rabbit_hosts" config option instead of "rabbit_host" This allows for easy client configuration against clustered RabbitMQ setups. Does not break existing configs. Change-Id: I2b180f8860a727e35d7b465253689e5e8c44eb98 Closes-Bug: 1286411 --- lib/rpc_backend | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index a0424b1dee..e922daa078 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -186,7 +186,7 @@ function iniset_rpc_backend { fi elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_kombu - iniset $file $section rabbit_host $RABBIT_HOST + iniset $file $section rabbit_hosts $RABBIT_HOST iniset $file $section rabbit_password $RABBIT_PASSWORD fi } From 12cb2299e8e4d933c7181ef1a9b97478214d2200 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 28 Feb 2014 19:53:50 -0500 Subject: [PATCH 0529/4438] nova changes for multinode working under -o errexit There was a stray inicomment on paste outside of a nova-api block. This fails under -o errexit because the paste.ini doesn't exist. Move this to inside the correct block. Change-Id: Iffbdae6716a1c2a8f650b68edd4faf436434eab1 --- lib/nova | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index 90b1ba4fde..583a5923ce 100644 --- a/lib/nova +++ b/lib/nova @@ -245,10 +245,9 @@ function configure_nova { inicomment $NOVA_API_PASTE_INI filter:authtoken cafile inicomment $NOVA_API_PASTE_INI filter:authtoken admin_user inicomment $NOVA_API_PASTE_INI filter:authtoken admin_password + inicomment $NOVA_API_PASTE_INI filter:authtoken signing_dir fi - inicomment $NOVA_API_PASTE_INI filter:authtoken signing_dir - if is_service_enabled n-cpu; then # Force IP forwarding on, just on case sudo sysctl -w net.ipv4.ip_forward=1 From 7083b8224dab423392e21b069a1a6ef54cd14a8f Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 28 Feb 2014 20:16:20 -0500 Subject: [PATCH 0530/4438] make ceilometer work if you don't enable ceilometer-api when doing ceilometer in a multihost devstack, you don't want ceilometer-api running on the computes. Under -o errexit this became fatal. Change-Id: Ie43c8724ba467b810f5a3b075dea45d66dde8648 --- lib/ceilometer | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index d20d628247..0be4184a37 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -206,9 +206,12 @@ function start_ceilometer { screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF" screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" - echo "Waiting for ceilometer-api to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then - die $LINENO "ceilometer-api did not start" + # only die on API if it was actually intended to be turned on + if service_enabled ceilometer-api; then + echo "Waiting for ceilometer-api to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then + die $LINENO "ceilometer-api did not start" + fi fi screen_it ceilometer-alarm-notifier "cd ; ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" From c921a95f63b00c549763c9968a103d44df590032 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 28 Feb 2014 21:09:33 -0500 Subject: [PATCH 0531/4438] only do a dbsync if on the database node ceilometer should only try to reset the database if it's actually on a node where there is a database. Change-Id: Ibcfec0556829bff0938e3769c19d34ae6c02b738 --- lib/ceilometer | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 0be4184a37..2e6e7c5a76 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -180,9 +180,11 @@ function init_ceilometer { sudo chown $STACK_USER $CEILOMETER_AUTH_CACHE_DIR rm -f $CEILOMETER_AUTH_CACHE_DIR/* - if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then - recreate_database ceilometer utf8 - $CEILOMETER_BIN_DIR/ceilometer-dbsync + if is_service_enabled mysql postgresql; then + if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then + recreate_database ceilometer utf8 + $CEILOMETER_BIN_DIR/ceilometer-dbsync + fi fi } From a8880cc22c540e88c43da4e49fa6c976361484e4 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Sat, 1 Mar 2014 00:24:51 -0500 Subject: [PATCH 0532/4438] Use glance image-show to check for uploaded Docker images The behavior of the code being replaced was failing with '-o errexit' should that, as in the common case, the image has not been uploaded into Glance. While we could workaround this using a '|| :', the existing code also happened to overwrite the DOCKER_IMAGE global which is used elsewhere. It seemed prudent to either change this variable name or remove it altogether. Finally, using 'glance image-show' is more deterministic than grepping the output of 'glance image-list'. Change-Id: I23188155966ae9db64259b4a9d25a0d98c63c912 Closes-Bug: 1286443 --- lib/nova_plugins/hypervisor-docker | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index f8dc6afa19..cdbc4d172d 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -104,8 +104,7 @@ function start_nova_hypervisor { fi # Make sure we copied the image in Glance - DOCKER_IMAGE=$(glance image-list | egrep " $DOCKER_IMAGE_NAME ") - if ! is_set DOCKER_IMAGE ; then + if ! (glance image-show "$DOCKER_IMAGE"); then docker push $DOCKER_REPOSITORY_NAME fi } From 5a110d4e684d5cf936621608003f6b30eb75c2b1 Mon Sep 17 00:00:00 2001 From: fumihiko kakuma Date: Wed, 29 Jan 2014 14:42:06 +0900 Subject: [PATCH 0533/4438] Add configurations for the OpenFlow Agent mechanism driver This patch supports configurations for an environment of the OpenFlow Agent mechanism driver Set the following variables in a localrc to be ran this mechanism driver. Q_ML2_PLUGIN_MECHANISM_DRIVERS=ofagent Q_AGENT=ofagent Implements: blueprint ryu-ml2-driver Change-Id: I774da9a26f241487dfa4ec124b12f528704d860b --- lib/neutron_plugins/ofagent_agent | 94 +++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 lib/neutron_plugins/ofagent_agent diff --git a/lib/neutron_plugins/ofagent_agent b/lib/neutron_plugins/ofagent_agent new file mode 100644 index 0000000000..724df41d4c --- /dev/null +++ b/lib/neutron_plugins/ofagent_agent @@ -0,0 +1,94 @@ +# OpenFlow Agent plugin +# ---------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/neutron_plugins/ovs_base +source $TOP_DIR/lib/neutron_thirdparty/ryu # for RYU_DIR, install_ryu, etc + +function neutron_plugin_create_nova_conf { + _neutron_ovs_base_configure_nova_vif_driver +} + +function neutron_plugin_install_agent_packages { + _neutron_ovs_base_install_agent_packages + + # This agent uses ryu to talk with switches + install_package $(get_packages "ryu") + install_ryu + configure_ryu +} + +function neutron_plugin_configure_debug_command { + _neutron_ovs_base_configure_debug_command +} + +function neutron_plugin_configure_dhcp_agent { + iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport +} + +function neutron_plugin_configure_l3_agent { + _neutron_ovs_base_configure_l3_agent + iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport +} + +function neutron_plugin_configure_plugin_agent { + # Set up integration bridge + _neutron_ovs_base_setup_bridge $OVS_BRIDGE + _neutron_ovs_base_configure_firewall_driver + + # Check a supported openflow version + OF_VERSION=`ovs-ofctl --version | grep "OpenFlow versions" | awk '{print $3}' | cut -d':' -f2` + if [ `vercmp_numbers "$OF_VERSION" "0x3"` -lt "0" ]; then + die $LINENO "This agent requires OpenFlow 1.3+ capable switch." + fi + + # Enable tunnel networks if selected + if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then + # Verify tunnels are supported + # REVISIT - also check kernel module support for GRE and patch ports + OVS_VERSION=`ovs-vsctl --version | head -n 1 | grep -E -o "[0-9]+\.[0-9]+"` + if [ `vercmp_numbers "$OVS_VERSION" "1.4"` -lt "0" ]; then + die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts." + fi + iniset /$Q_PLUGIN_CONF_FILE ovs enable_tunneling True + iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP + fi + + # Setup physical network bridge mappings. Override + # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more + # complex physical network configurations. + if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then + OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE + + # Configure bridge manually with physical interface as port for multi-node + sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE + fi + if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings $OVS_BRIDGE_MAPPINGS + fi + AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-ofagent-agent" + + # Define extra "AGENT" configuration options when q-agt is configured by defining + # defining the array ``Q_AGENT_EXTRA_AGENT_OPTS``. + # For Example: ``Q_AGENT_EXTRA_AGENT_OPTS=(foo=true bar=2)`` + for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + iniset /$Q_PLUGIN_CONF_FILE agent ${I/=/ } + done +} + +function neutron_plugin_setup_interface_driver { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver + iniset $conf_file DEFAULT ovs_use_veth True +} + +function neutron_plugin_check_adv_test_requirements { + is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 +} + +# Restore xtrace +$MY_XTRACE From 46c688c1ae2bdb0fc923635392a602efa3fd38c2 Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Mon, 24 Feb 2014 18:42:37 +0900 Subject: [PATCH 0534/4438] Fix pep8 errors This commit fixes pep8 errors. Change-Id: Ia1f1d61081a86b8a58251918392171cbc60f5ab8 --- tools/jenkins/jenkins_home/print_summary.py | 17 ++++++++++++-- tools/uec/meta.py | 25 ++++++++++++++++----- 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/tools/jenkins/jenkins_home/print_summary.py b/tools/jenkins/jenkins_home/print_summary.py index ea943e1caf..ee3790fcda 100755 --- a/tools/jenkins/jenkins_home/print_summary.py +++ b/tools/jenkins/jenkins_home/print_summary.py @@ -1,7 +1,20 @@ #!/usr/bin/python -import urllib + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + import json import sys +import urllib def print_usage(): @@ -42,4 +55,4 @@ def fetch_blob(url): 'logUrl': log_url, 'healthReport': config['healthReport']}) -print json.dumps(results) +print(json.dumps(results)) diff --git a/tools/uec/meta.py b/tools/uec/meta.py index 5b845d81a6..1d994a60d6 100644 --- a/tools/uec/meta.py +++ b/tools/uec/meta.py @@ -1,10 +1,23 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import BaseHTTPServer +import SimpleHTTPServer import sys -from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler -from SimpleHTTPServer import SimpleHTTPRequestHandler -def main(host, port, HandlerClass = SimpleHTTPRequestHandler, - ServerClass = HTTPServer, protocol="HTTP/1.0"): - """simple http server that listens on a give address:port""" + +def main(host, port, HandlerClass=SimpleHTTPServer.SimpleHTTPRequestHandler, + ServerClass=BaseHTTPServer.HTTPServer, protocol="HTTP/1.0"): + """simple http server that listens on a give address:port.""" server_address = (host, port) @@ -12,7 +25,7 @@ def main(host, port, HandlerClass = SimpleHTTPRequestHandler, httpd = ServerClass(server_address, HandlerClass) sa = httpd.socket.getsockname() - print "Serving HTTP on", sa[0], "port", sa[1], "..." + print("Serving HTTP on", sa[0], "port", sa[1], "...") httpd.serve_forever() if __name__ == '__main__': From 9b3602ccf64f1d690a0a3d4adff987a5a12594b1 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Fri, 28 Feb 2014 13:52:29 -0500 Subject: [PATCH 0535/4438] Fix intermittent error in exercises/floating_ips.sh Every once in a while I see this error running floating_ips.sh: /devstack/exercises/floating_ips.sh:184:ping_check /devstack/functions:356:_ping_check_neutron /devstack/lib/neutron:904:die [ERROR] /devstack/lib/neutron:904 [Fail] Could ping server I think the problem is that it immediately tries to ping right after the icmp rule is deleted. Add a timeout and check so we at least wait one second. Change-Id: I753ec257fa12f6d2ddff1a5b1909e32d8995e173 --- exercises/floating_ips.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 8dc44effbc..8b7b96197e 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -178,6 +178,10 @@ fi nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \ die $LINENO "Failure deleting security group rule from $SECGROUP" +if ! timeout $ASSOCIATE_TIMEOUT sh -c "while nova secgroup-list-rules $SECGROUP | grep -q icmp; do sleep 1; done"; then + die $LINENO "Security group rule not deleted from $SECGROUP" +fi + # FIXME (anthony): make xs support security groups if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds From 729236ca1a38804b3c31ec39ef65592e0108f863 Mon Sep 17 00:00:00 2001 From: Mohammad Banikazemi Date: Wed, 5 Feb 2014 14:45:04 -0500 Subject: [PATCH 0536/4438] Adds support for IBM SDN-VE Neutron plugin This provides the support for the monolithic plugin for IBM SDN-VE that is being added to Neutron here: https://review.openstack.org/#/c/66453/ Implements: blueprint ibm-sdnve-plugin-support Depends-On: I92619a95bca2ae0c37e7fdd39da30119b43d1ad6 DocImpact Change-Id: I0958457355036fdab93156cd7fb4afd1a458918b --- lib/neutron_plugins/ibm | 133 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) create mode 100644 lib/neutron_plugins/ibm diff --git a/lib/neutron_plugins/ibm b/lib/neutron_plugins/ibm new file mode 100644 index 0000000000..22c8578e64 --- /dev/null +++ b/lib/neutron_plugins/ibm @@ -0,0 +1,133 @@ +# Neutron IBM SDN-VE plugin +# --------------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/neutron_plugins/ovs_base + +function neutron_plugin_install_agent_packages { + _neutron_ovs_base_install_agent_packages +} + +function _neutron_interface_setup { + # Setup one interface on the integration bridge if needed + # The plugin agent to be used if more than one interface is used + local bridge=$1 + local interface=$2 + sudo ovs-vsctl --no-wait -- --may-exist add-port $bridge $interface +} + +function neutron_setup_integration_bridge { + # Setup integration bridge if needed + if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then + neutron_ovs_base_cleanup + _neutron_ovs_base_setup_bridge $SDNVE_INTEGRATION_BRIDGE + if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then + interfaces=(${SDNVE_INTERFACE_MAPPINGS//[,:]/ }) + _neutron_interface_setup $SDNVE_INTEGRATION_BRIDGE ${interfaces[1]} + fi + fi + + # Set controller to SDNVE controller (1st of list) if exists + if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then + # Get the first controller + controllers=(${SDNVE_CONTROLLER_IPS//[\[,\]]/ }) + SDNVE_IP=${controllers[0]} + sudo ovs-vsctl set-controller $SDNVE_INTEGRATION_BRIDGE tcp:$SDNVE_IP + fi +} + +function neutron_plugin_create_nova_conf { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + # if n-cpu is enabled, then setup integration bridge + if is_service_enabled n-cpu; then + neutron_setup_integration_bridge + fi +} + +function is_neutron_ovs_base_plugin { + if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then + # Yes, we use OVS. + return 0 + else + # No, we do not use OVS. + return 1 + fi +} + +function neutron_plugin_configure_common { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ibm + Q_PLUGIN_CONF_FILENAME=sdnve_neutron_plugin.ini + Q_DB_NAME="sdnve_neutron" + Q_PLUGIN_CLASS="neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2" +} + +function neutron_plugin_configure_service { + # Define extra "SDNVE" configuration options when q-svc is configured + + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver + + if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE sdnve controller_ips $SDNVE_CONTROLLER_IPS + fi + + if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE sdnve integration_bridge $SDNVE_INTEGRATION_BRIDGE + fi + + if [[ "$SDNVE_RESET_BRIDGE" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE sdnve reset_bridge $SDNVE_RESET_BRIDGE + fi + + if [[ "$SDNVE_OUT_OF_BAND" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE sdnve out_of_band $SDNVE_OUT_OF_BAND + fi + + if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE sdnve interface_mappings $SDNVE_INTERFACE_MAPPINGS + fi + + if [[ "$SDNVE_FAKE_CONTROLLER" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE sdnve use_fake_controller $SDNVE_FAKE_CONTROLLER + fi + + + iniset $NEUTRON_CONF DEFAULT notification_driver neutron.openstack.common.notifier.no_op_notifier + +} + +function neutron_plugin_configure_plugin_agent { + AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-ibm-agent" +} + +function neutron_plugin_configure_debug_command { + : +} + +function neutron_plugin_setup_interface_driver { + return 0 +} + +function has_neutron_plugin_security_group { + # Does not support Security Groups + return 1 +} + +function neutron_ovs_base_cleanup { + if [[ "$SDNVE_RESET_BRIDGE" != False ]]; then + # remove all OVS ports that look like Neutron created ports + for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do + sudo ovs-vsctl del-port ${port} + done + + # remove integration bridge created by Neutron + for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${SDNVE_INTEGRATION_BRIDGE}); do + sudo ovs-vsctl del-br ${bridge} + done + fi +} + +# Restore xtrace +$MY_XTRACE From 91baef3e26994c64249453dd0b1d8998eda10eca Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 28 Feb 2014 11:11:45 -0600 Subject: [PATCH 0537/4438] Clarify deprecation of EXTRA_xxx_OPTS The various EXTRA_xxx_OPTS variables will be removed in the Juno development cycle, change the README to reflect the new way for the Neutron variables. Change-Id: Ic84da4a9b5a83e66cf0b57d643a87691f15517f0 --- README.md | 50 ++++++++++++++++++++++++++++++++++---------------- stack.sh | 48 +++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 79 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index 9914b1ed69..9304240f70 100644 --- a/README.md +++ b/README.md @@ -163,7 +163,7 @@ services are started in background and managed by `swift-init` tool. Basic Setup In order to enable Neutron a single node setup, you'll need the -following settings in your `localrc` section: +following settings in your `local.conf`: disable_service n-net enable_service q-svc @@ -172,7 +172,6 @@ following settings in your `localrc` section: enable_service q-l3 enable_service q-meta enable_service q-metering - enable_service neutron # Optional, to enable tempest configuration as part of DevStack enable_service tempest @@ -180,24 +179,44 @@ Then run `stack.sh` as normal. DevStack supports setting specific Neutron configuration flags to the service, Open vSwitch plugin and LinuxBridge plugin configuration files. -To make use of this feature, the following variables are defined and can -be configured in your `localrc` section: +To make use of this feature, the settings can be added to ``local.conf``. +The old ``Q_XXX_EXTRA_XXX_OPTS`` variables are deprecated and will be removed +in the near future. The ``local.conf`` headers for the replacements are: - Variable Name Config File Section Modified - ------------------------------------------------------------------------------------- - Q_SRV_EXTRA_OPTS Plugin `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge) - Q_AGENT_EXTRA_AGENT_OPTS Plugin AGENT - Q_AGENT_EXTRA_SRV_OPTS Plugin `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge) - Q_SRV_EXTRA_DEFAULT_OPTS Service DEFAULT +* ``Q_SRV_EXTRA_OPTS``: + + [[post-config|/$Q_PLUGIN_CONF_FILE]] + [linuxbridge] # or [ovs] + +* ``Q_AGENT_EXTRA_AGENT_OPTS``: + + [[post-config|/$Q_PLUGIN_CONF_FILE]] + [agent] + +* ``Q_AGENT_EXTRA_SRV_OPTS``: -An example of using the variables in your `localrc` section is below: + [[post-config|/$Q_PLUGIN_CONF_FILE]] + [linuxbridge] # or [ovs] + +* ``Q_SRV_EXTRA_DEFAULT_OPTS``: + + [[post-config|$NEUTRON_CONF]] + [DEFAULT] - Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_type=vxlan vxlan_udp_port=8472) - Q_SRV_EXTRA_OPTS=(tenant_network_type=vxlan) +Example extra config in `local.conf`: + + [[post-config|/$Q_PLUGIN_CONF_FILE]] + [agent] + tunnel_type=vxlan + vxlan_udp_port=8472 + + [[post-config|$NEUTRON_CONF]] + [DEFAULT] + tenant_network_type=vxlan DevStack also supports configuring the Neutron ML2 plugin. The ML2 plugin -can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. A -simple way to configure the ml2 plugin is shown below: +can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. This +is a simple way to configure the ml2 plugin: # VLAN configuration Q_PLUGIN=ml2 @@ -223,7 +242,6 @@ To change this, set the `Q_AGENT` variable to the agent you want to run Q_ML2_PLUGIN_GRE_TYPE_OPTIONS GRE TypeDriver options. Defaults to none. Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS VXLAN TypeDriver options. Defaults to none. Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS VLAN TypeDriver options. Defaults to none. - Q_AGENT_EXTRA_AGENT_OPTS Extra configuration options to pass to the OVS or LinuxBridge Agent. # Heat diff --git a/stack.sh b/stack.sh index ccd567e0bc..988fda5ff1 100755 --- a/stack.sh +++ b/stack.sh @@ -1359,12 +1359,14 @@ if [[ -n "$DEPRECATED_TEXT" ]]; then echo_summary "WARNING: $DEPRECATED_TEXT" fi +# TODO(dtroyer): Remove EXTRA_OPTS after stable/icehouse branch is cut # Specific warning for deprecated configs if [[ -n "$EXTRA_OPTS" ]]; then echo "" echo_summary "WARNING: EXTRA_OPTS is used" echo "You are using EXTRA_OPTS to pass configuration into nova.conf." echo "Please convert that configuration in localrc to a nova.conf section in local.conf:" + echo "EXTRA_OPTS will be removed early in the Juno development cycle" echo " [[post-config|\$NOVA_CONF]] [DEFAULT] @@ -1375,11 +1377,13 @@ if [[ -n "$EXTRA_OPTS" ]]; then done fi +# TODO(dtroyer): Remove EXTRA_BAREMETAL_OPTS after stable/icehouse branch is cut if [[ -n "$EXTRA_BAREMETAL_OPTS" ]]; then echo "" - echo_summary "WARNING: EXTRA_OPTS is used" - echo "You are using EXTRA_OPTS to pass configuration into nova.conf." + echo_summary "WARNING: EXTRA_BAREMETAL_OPTS is used" + echo "You are using EXTRA_BAREMETAL_OPTS to pass configuration into nova.conf." echo "Please convert that configuration in localrc to a nova.conf section in local.conf:" + echo "EXTRA_BAREMETAL_OPTS will be removed early in the Juno development cycle" echo " [[post-config|\$NOVA_CONF]] [baremetal] @@ -1390,13 +1394,49 @@ if [[ -n "$EXTRA_BAREMETAL_OPTS" ]]; then done fi +# TODO(dtroyer): Remove Q_AGENT_EXTRA_AGENT_OPTS after stable/juno branch is cut +if [[ -n "$Q_AGENT_EXTRA_AGENT_OPTS" ]]; then + echo "" + echo_summary "WARNING: Q_AGENT_EXTRA_AGENT_OPTS is used" + echo "You are using Q_AGENT_EXTRA_AGENT_OPTS to pass configuration into $NEUTRON_CONF." + echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:" + echo "Q_AGENT_EXTRA_AGENT_OPTS will be removed early in the 'K' development cycle" + echo " +[[post-config|/\$Q_PLUGIN_CONF_FILE]] +[DEFAULT] +" + for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + echo ${I} + done +fi + +# TODO(dtroyer): Remove Q_AGENT_EXTRA_SRV_OPTS after stable/juno branch is cut +if [[ -n "$Q_AGENT_EXTRA_SRV_OPTS" ]]; then + echo "" + echo_summary "WARNING: Q_AGENT_EXTRA_SRV_OPTS is used" + echo "You are using Q_AGENT_EXTRA_SRV_OPTS to pass configuration into $NEUTRON_CONF." + echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:" + echo "Q_AGENT_EXTRA_AGENT_OPTS will be removed early in the 'K' development cycle" + echo " +[[post-config|/\$Q_PLUGIN_CONF_FILE]] +[DEFAULT] +" + for I in "${Q_AGENT_EXTRA_SRV_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + echo ${I} + done +fi + +# TODO(dtroyer): Remove Q_DHCP_EXTRA_DEFAULT_OPTS after stable/icehouse branch is cut if [[ -n "$Q_DHCP_EXTRA_DEFAULT_OPTS" ]]; then echo "" echo_summary "WARNING: Q_DHCP_EXTRA_DEFAULT_OPTS is used" echo "You are using Q_DHCP_EXTRA_DEFAULT_OPTS to pass configuration into $Q_DHCP_CONF_FILE." echo "Please convert that configuration in localrc to a $Q_DHCP_CONF_FILE section in local.conf:" + echo "Q_DHCP_EXTRA_DEFAULT_OPTS will be removed early in the Juno development cycle" echo " -[[post-config|\$Q_DHCP_CONF_FILE]] +[[post-config|/\$Q_DHCP_CONF_FILE]] [DEFAULT] " for I in "${Q_DHCP_EXTRA_DEFAULT_OPTS[@]}"; do @@ -1405,11 +1445,13 @@ if [[ -n "$Q_DHCP_EXTRA_DEFAULT_OPTS" ]]; then done fi +# TODO(dtroyer): Remove Q_SRV_EXTRA_DEFAULT_OPTS after stable/icehouse branch is cut if [[ -n "$Q_SRV_EXTRA_DEFAULT_OPTS" ]]; then echo "" echo_summary "WARNING: Q_SRV_EXTRA_DEFAULT_OPTS is used" echo "You are using Q_SRV_EXTRA_DEFAULT_OPTS to pass configuration into $NEUTRON_CONF." echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:" + echo "Q_SRV_EXTRA_DEFAULT_OPTS will be removed early in the Juno development cycle" echo " [[post-config|\$NEUTRON_CONF]] [DEFAULT] From 57d478d87438912e1a33d4a2d00d4a300148e2fc Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Fri, 28 Feb 2014 16:37:43 +0000 Subject: [PATCH 0538/4438] Move heat keystone setup into lib/heat Move the heat setup which currently happens in files/keystone_data.sh to lib/heat, where we have create_heat_accounts. Move the user, role, service and endpoint creation as that is consistent with what other services, e.g lib/nova are doing. Change-Id: Iaa2c822cad581d6b2b4f22f8863daf81e25f8485 --- files/keystone_data.sh | 35 ---------------------------------- lib/heat | 43 +++++++++++++++++++++++++++++++++++++++++- stack.sh | 3 +-- 3 files changed, 43 insertions(+), 38 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 9a34c7616f..fc1e8136a4 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -53,41 +53,6 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" | --role ResellerAdmin fi -# Heat -if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then - keystone user-create --name=heat \ - --pass="$SERVICE_PASSWORD" \ - --tenant $SERVICE_TENANT_NAME \ - --email=heat@example.com - keystone user-role-add --tenant $SERVICE_TENANT_NAME \ - --user heat \ - --role service - # heat_stack_user role is for users created by Heat - keystone role-create --name heat_stack_user - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - keystone service-create \ - --name=heat-cfn \ - --type=cloudformation \ - --description="Heat CloudFormation Service" - keystone endpoint-create \ - --region RegionOne \ - --service heat-cfn \ - --publicurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \ - --adminurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \ - --internalurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" - keystone service-create \ - --name=heat \ - --type=orchestration \ - --description="Heat Service" - keystone endpoint-create \ - --region RegionOne \ - --service heat \ - --publicurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ - --internalurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" - fi -fi - # Glance if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then keystone user-create \ diff --git a/lib/heat b/lib/heat index d0c0302016..42d1057cbd 100644 --- a/lib/heat +++ b/lib/heat @@ -197,8 +197,49 @@ function disk_image_create { } # create_heat_accounts() - Set up common required heat accounts -# Note this is in addition to what is in files/keystone_data.sh function create_heat_accounts { + # migrated from files/keystone_data.sh + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") + + HEAT_USER=$(openstack user create \ + heat \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email heat@example.com \ + | grep " id " | get_field 2) + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $HEAT_USER + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + HEAT_SERVICE=$(openstack service create \ + heat \ + --type=orchestration \ + --description="Heat Orchestration Service" \ + | grep " id " | get_field 2) + openstack endpoint create \ + $HEAT_SERVICE \ + --region RegionOne \ + --publicurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ + --adminurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ + --internalurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" + HEAT_CFN_SERVICE=$(openstack service create \ + heat \ + --type=cloudformation \ + --description="Heat CloudFormation Service" \ + | grep " id " | get_field 2) + openstack endpoint create \ + $HEAT_CFN_SERVICE \ + --region RegionOne \ + --publicurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \ + --adminurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \ + --internalurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" + fi + + # heat_stack_user role is for users created by Heat + openstack role create heat_stack_user + # Note we have to pass token/endpoint here because the current endpoint and # version negotiation in OSC means just --os-identity-api-version=3 won't work KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" diff --git a/stack.sh b/stack.sh index ccd567e0bc..ec8de2d2dd 100755 --- a/stack.sh +++ b/stack.sh @@ -934,8 +934,7 @@ if is_service_enabled key; then ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \ - DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES HEAT_API_CFN_PORT=$HEAT_API_CFN_PORT \ - HEAT_API_PORT=$HEAT_API_PORT \ + DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \ bash -x $FILES/keystone_data.sh # Set up auth creds now that keystone is bootstrapped From 2ca3bf18dd756621f012ebb7ffb338f2fa38d6f2 Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Mon, 3 Mar 2014 18:07:33 +0000 Subject: [PATCH 0539/4438] Add heat_stack_owner role for heat trusts usage Heat supports deferred operations via keystone trusts, and we'd like to make that the default. To do this, we require a new role, which is the default role specified in heat.conf trusts_delegated_roles, heat_stack_owner. Add the role to the admin/demo users so they can create heat stacks when we make deferred_auth_method=trusts the default. Change-Id: Idfc70ee89428c23f5965e643486ff2ad9566471c Related-Bug: #1286157 --- lib/heat | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/lib/heat b/lib/heat index 42d1057cbd..2d9d863f0c 100644 --- a/lib/heat +++ b/lib/heat @@ -240,6 +240,19 @@ function create_heat_accounts { # heat_stack_user role is for users created by Heat openstack role create heat_stack_user + # heat_stack_owner role is given to users who create Heat stacks, + # it's the default role used by heat to delegate to the heat service + # user (for performing deferred operations via trusts), see heat.conf + HEAT_OWNER_ROLE=$(openstack role create \ + heat_stack_owner \ + | grep " id " | get_field 2) + + # Give the role to the demo and admin users so they can create stacks + # in either of the projects created by devstack + openstack role add $HEAT_OWNER_ROLE --project demo --user demo + openstack role add $HEAT_OWNER_ROLE --project demo --user admin + openstack role add $HEAT_OWNER_ROLE --project admin --user admin + # Note we have to pass token/endpoint here because the current endpoint and # version negotiation in OSC means just --os-identity-api-version=3 won't work KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" From de3b82037d863b55cc245c343a8697b5cf4b1904 Mon Sep 17 00:00:00 2001 From: Shweta P Date: Mon, 3 Mar 2014 13:38:37 -0500 Subject: [PATCH 0540/4438] NCCLIENT_REPO is using the wrong url NCCLIENT_REPO value in lib/neutron_plugins/cisco is pointing to a repo that does not exist. This fix corrects the url. Closes-Bug #1286302 Change-Id: I42db0b3f7a4bbf5d1d053e3da8b4fbb67d47de94 --- lib/neutron_plugins/cisco | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco index 7728eb177f..a1b089e1a3 100644 --- a/lib/neutron_plugins/cisco +++ b/lib/neutron_plugins/cisco @@ -23,7 +23,7 @@ Q_CISCO_PLUGIN_VLAN_RANGES=${Q_CISCO_PLUGIN_VLAN_RANGES:-vlan:1:4094} # Specify ncclient package information NCCLIENT_DIR=$DEST/ncclient NCCLIENT_VERSION=${NCCLIENT_VERSION:-0.3.1} -NCCLIENT_REPO=${NCCLIENT_REPO:-${GIT_BASE}/CiscoSystems/ncclient.git} +NCCLIENT_REPO=${NCCLIENT_REPO:-git://github.com/CiscoSystems/ncclient.git} NCCLIENT_BRANCH=${NCCLIENT_BRANCH:-master} # This routine put a prefix on an existing function name From 753afeba7464464a3fd050eb2085e51580f9b5a7 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Thu, 13 Feb 2014 17:17:30 -0800 Subject: [PATCH 0541/4438] Use neutron security groups in BigSwitch plugin Configures the Big Switch third-party plugin to use neutron security groups instead of nova security groups. Change-Id: I6bc3046ff0e70b8288a7c3f3d6f975376adc081a Implements: blueprint bigswitch-neutron-security --- lib/neutron_plugins/bigswitch_floodlight | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index 4cb0da84ea..b1b77d7606 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -38,7 +38,12 @@ function neutron_plugin_configure_l3_agent { } function neutron_plugin_configure_plugin_agent { - : + # Set up integration bridge + _neutron_ovs_base_setup_bridge $OVS_BRIDGE + iniset /$Q_PLUGIN_CONF_FILE restproxyagent integration_bridge $OVS_BRIDGE + AGENT_BINARY="$NEUTRON_DIR/neutron/plugins/bigswitch/agent/restproxy_agent.py" + + _neutron_ovs_base_configure_firewall_driver } function neutron_plugin_configure_service { @@ -61,7 +66,7 @@ function neutron_plugin_setup_interface_driver { function has_neutron_plugin_security_group { # 1 means False here - return 1 + return 0 } function neutron_plugin_check_adv_test_requirements { From 8829acaf141ade6d5ac61ec3d0b15d80e3a09752 Mon Sep 17 00:00:00 2001 From: zhang-jinnan Date: Mon, 3 Mar 2014 10:55:33 +0800 Subject: [PATCH 0542/4438] Remove blank space after print Keep code clean and pleasure:) Change-Id: Ie0c0781eaeb57b32a9a6185a59353fc4b911afd6 --- tools/jenkins/jenkins_home/print_summary.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/jenkins/jenkins_home/print_summary.py b/tools/jenkins/jenkins_home/print_summary.py index ea943e1caf..6310b1889f 100755 --- a/tools/jenkins/jenkins_home/print_summary.py +++ b/tools/jenkins/jenkins_home/print_summary.py @@ -5,8 +5,8 @@ def print_usage(): - print ("Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]" - % sys.argv[0]) + print("Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]" + % sys.argv[0]) sys.exit() From ccf60f75a2a5a0f10412b4f806ac7a123068909b Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Mon, 3 Mar 2014 22:48:31 -0500 Subject: [PATCH 0543/4438] Put tempest ipv6 option in the correct group This commit updates the location for the ipv6 option to be in the proper group. This depends on tempest change I35769cf4d18363fad56ed5150b4d01d8a5ad17e7 Change-Id: Ief5ea00649c8954282245e30c63c45557a28ea9f --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 16f8744d85..1639ae60b4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -293,7 +293,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG network public_network_id "$public_network_id" iniset $TEMPEST_CONFIG network public_router_id "$public_router_id" iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE" - iniset $TEMPEST_CONFIG network ipv6_enabled "$IPV6_ENABLED" + iniset $TEMPEST_CONFIG network-feature-enabled ipv6 "$IPV6_ENABLED" # boto iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" From 314af0a7a97b31ff2a803a77e1a92f5b67857f18 Mon Sep 17 00:00:00 2001 From: Sreeram Yerrapragada Date: Mon, 3 Mar 2014 21:34:45 -0800 Subject: [PATCH 0544/4438] Fix upload function for vmdk files Fix all grep statements failing under -o errexit. Change-Id: I0591a2ba7351d598eb5b29d68a83ce6290600938 --- functions | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/functions b/functions index a844b1c1af..ab8319b0ce 100644 --- a/functions +++ b/functions @@ -55,7 +55,7 @@ function upload_image { mkdir -p $FILES/images IMAGE_FNAME=`basename "$image_url"` if [[ $image_url != file* ]]; then - # Downloads the image (uec ami+aki style), then extracts it. + # Downloads the image (uec ami+akistyle), then extracts it. if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then wget -c $image_url -O $FILES/$IMAGE_FNAME if [[ $? -ne 0 ]]; then @@ -103,12 +103,12 @@ function upload_image { vmdk_net_adapter="" # vmdk adapter type - vmdk_adapter_type="$(head -25 $IMAGE | grep -a -F -m 1 'ddb.adapterType =' $IMAGE)" + vmdk_adapter_type="$(head -25 $IMAGE | { grep -a -F -m 1 'ddb.adapterType =' $IMAGE || true; })" vmdk_adapter_type="${vmdk_adapter_type#*\"}" vmdk_adapter_type="${vmdk_adapter_type%?}" # vmdk disk type - vmdk_create_type="$(head -25 $IMAGE | grep -a -F -m 1 'createType=' $IMAGE)" + vmdk_create_type="$(head -25 $IMAGE | { grep -a -F -m 1 'createType=' $IMAGE || true; })" vmdk_create_type="${vmdk_create_type#*\"}" vmdk_create_type="${vmdk_create_type%\"*}" @@ -119,7 +119,7 @@ function upload_image { elif [[ "$vmdk_create_type" = "monolithicFlat" || \ "$vmdk_create_type" = "vmfs" ]]; then # Attempt to retrieve the *-flat.vmdk - flat_fname="$(head -25 $IMAGE | grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE)" + flat_fname="$(head -25 $IMAGE | { grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE || true; })" flat_fname="${flat_fname#*\"}" flat_fname="${flat_fname%?}" if [[ -z "$flat_name" ]]; then @@ -190,7 +190,7 @@ function upload_image { fi if $descriptor_found; then vmdk_adapter_type="$(head -25 $descriptor_url |"` - `"grep -a -F -m 1 'ddb.adapterType =' $descriptor_url)" + `" { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })" vmdk_adapter_type="${vmdk_adapter_type#*\"}" vmdk_adapter_type="${vmdk_adapter_type%?}" fi @@ -203,7 +203,7 @@ function upload_image { # NOTE: For backwards compatibility reasons, colons may be used in place # of semi-colons for property delimiters but they are not permitted # characters in NTFS filesystems. - property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$'` + property_string=`echo "$IMAGE_NAME" | { grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$' || true; }` IFS=':;' read -a props <<< "$property_string" vmdk_disktype="${props[0]:-$vmdk_disktype}" vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}" From a439faa85b89b0d2c73085743426fd8741293cb6 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Mon, 24 Feb 2014 20:32:19 +0900 Subject: [PATCH 0545/4438] Update required packages for ryu Sync with the recent reality. Change-Id: I4c37d09e511f3763d2267267815387bd5c825e0e Closes-Bug: 1287541 --- files/apts/ryu | 4 +--- files/rpms-suse/ryu | 4 +--- files/rpms/ryu | 4 +--- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/files/apts/ryu b/files/apts/ryu index e8ed926c1e..9b850807e6 100644 --- a/files/apts/ryu +++ b/files/apts/ryu @@ -1,4 +1,2 @@ -python-gevent -python-gflags -python-netifaces +python-eventlet python-sphinx diff --git a/files/rpms-suse/ryu b/files/rpms-suse/ryu index 3797b6cb44..6b426fb163 100644 --- a/files/rpms-suse/ryu +++ b/files/rpms-suse/ryu @@ -1,4 +1,2 @@ python-Sphinx -python-gevent -python-netifaces -python-python-gflags +python-eventlet diff --git a/files/rpms/ryu b/files/rpms/ryu index e8ed926c1e..9b850807e6 100644 --- a/files/rpms/ryu +++ b/files/rpms/ryu @@ -1,4 +1,2 @@ -python-gevent -python-gflags -python-netifaces +python-eventlet python-sphinx From 0e598c3c81fc3d652415095101a095de69ec8a6d Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Mon, 24 Feb 2014 22:02:08 +0900 Subject: [PATCH 0546/4438] Stop running setup_devel for Ryu It doesn't work here for various reasons. - Ryu's setup.py is incompatible with global requirements - This code is called before install_infra. Ryu is not a part of OpenStack anyway. Closes-Bug: 1287569 Change-Id: I01a942411f7d06bdf8f1fec5d1a0bc319560f329 --- lib/neutron_thirdparty/ryu | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu index 424a90041e..b2c1b613fe 100644 --- a/lib/neutron_thirdparty/ryu +++ b/lib/neutron_thirdparty/ryu @@ -18,14 +18,8 @@ RYU_OFP_PORT=${RYU_OFP_PORT:-6633} # Ryu Applications RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest} -# configure_ryu can be called multiple times as neutron_pluing/ryu may call -# this function for neutron-ryu-agent -_RYU_CONFIGURED=${_RYU_CONFIGURED:-False} function configure_ryu { - if [[ "$_RYU_CONFIGURED" == "False" ]]; then - setup_develop $RYU_DIR - _RYU_CONFIGURED=True - fi + : } function init_ryu { @@ -63,6 +57,7 @@ _RYU_INSTALLED=${_RYU_INSTALLED:-False} function install_ryu { if [[ "$_RYU_INSTALLED" == "False" ]]; then git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH + export PYTHONPATH=$RYU_DIR:$PYTHONPATH _RYU_INSTALLED=True fi } From d5b52ca7557ec1aef71f21c71110455a6aea2505 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 4 Mar 2014 09:23:07 -0500 Subject: [PATCH 0547/4438] fix tgt to use 'service' instead of upstart calls the comments in here were largely about oneric, which we don't support any more. service is installed in a precise environment, and will support debian and the upcoming transition to systemd better, so use that instead. Change-Id: If15493549a8c93a7387df9b3bba31443aed46995 --- lib/cinder | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/cinder b/lib/cinder index d003f5dc7b..dd2956a5b4 100644 --- a/lib/cinder +++ b/lib/cinder @@ -491,10 +491,7 @@ function start_cinder { sudo rm -f /etc/tgt/conf.d/stack.conf _configure_tgt_for_config_d if is_ubuntu; then - # tgt in oneiric doesn't restart properly if tgtd isn't running - # do it in two steps - sudo stop tgt || true - sudo start tgt + sudo service tgt restart elif is_fedora; then if [[ $DISTRO =~ (rhel6) ]]; then sudo /sbin/service tgtd restart From a67cb1af4df6b5c758c319e0590a3188d951e68d Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Tue, 4 Mar 2014 18:38:33 +0400 Subject: [PATCH 0548/4438] Fix typo in ironic configure function IRONIC_CONF should be replaced by IRONIC_CONF_FILE Change-Id: Ie43e376f42f14c46d21df7dbb19db923521f438b --- lib/ironic | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ironic b/lib/ironic index 4e5edc90cf..b346de1e69 100644 --- a/lib/ironic +++ b/lib/ironic @@ -124,7 +124,7 @@ function configure_ironic_conductor { cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR - iniset $IRONIC_CONF DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF + iniset $IRONIC_CONF_FILE DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF } # create_ironic_cache_dir() - Part of the init_ironic() process From 3d2bdf50bc0110c718de39606c8b803696a31285 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Sat, 1 Mar 2014 00:17:32 -0500 Subject: [PATCH 0549/4438] Use cat instead of read Date: Tue, 4 Mar 2014 15:02:04 -0500 Subject: [PATCH 0550/4438] fix typo in lib/ceilometer this should be is_service_enabled and not service_enabled. Not sure why it passes in the gate, but it fails in stackforge jobs. Change-Id: I876f72cd98ff9c8e4ea28832bc9ac6bbdc3b865d --- lib/ceilometer | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ceilometer b/lib/ceilometer index 2e6e7c5a76..04c1a34b8b 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -209,7 +209,7 @@ function start_ceilometer { screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" # only die on API if it was actually intended to be turned on - if service_enabled ceilometer-api; then + if is_service_enabled ceilometer-api; then echo "Waiting for ceilometer-api to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then die $LINENO "ceilometer-api did not start" From e2aa91b237e7e23f70847cba60a54a40560a5a3c Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Tue, 4 Mar 2014 04:40:19 -0500 Subject: [PATCH 0551/4438] Enable marconi-server to run when USE_SCREEN=false This patch, 1. adds log_file option to marconi.conf 2. redirects the output from marconi-server, in the same precedent set by another project. Change-Id: Ib273a03625d5a4edf8bb3ed7d522d2b087975acd --- lib/marconi | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/marconi b/lib/marconi index 29ae386d9f..a96137fc04 100644 --- a/lib/marconi +++ b/lib/marconi @@ -34,7 +34,8 @@ MARCONI_DIR=$DEST/marconi MARCONICLIENT_DIR=$DEST/python-marconiclient MARCONI_CONF_DIR=/etc/marconi MARCONI_CONF=$MARCONI_CONF_DIR/marconi.conf -MARCONI_API_LOG_DIR=/var/log/marconi-api +MARCONI_API_LOG_DIR=/var/log/marconi +MARCONI_API_LOG_FILE=$MARCONI_API_LOG_DIR/queues.log MARCONI_AUTH_CACHE_DIR=${MARCONI_AUTH_CACHE_DIR:-/var/cache/marconi} # Support potential entry-points console scripts @@ -96,6 +97,7 @@ function configure_marconi { iniset $MARCONI_CONF DEFAULT verbose True iniset $MARCONI_CONF DEFAULT use_syslog $SYSLOG + iniset $MARCONI_CONF DEFAULT log_file $MARCONI_API_LOG_FILE iniset $MARCONI_CONF 'drivers:transport:wsgi' bind $MARCONI_SERVICE_HOST iniset $MARCONI_CONF keystone_authtoken auth_protocol http @@ -148,7 +150,7 @@ function install_marconiclient { # start_marconi() - Start running processes, including screen function start_marconi { - screen_it marconi-server "marconi-server --config-file $MARCONI_CONF" + screen_it marconi-server "marconi-server --config-file $MARCONI_CONF 2>&1" echo "Waiting for Marconi to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT/v1/health; do sleep 1; done"; then die $LINENO "Marconi did not start" From ae1728917373986b68d2b0abe2e7052fb78e5903 Mon Sep 17 00:00:00 2001 From: ronak Date: Tue, 4 Mar 2014 15:48:22 -0800 Subject: [PATCH 0552/4438] Supporting Nuage Networks' Plugin through devstack Nuage networks' plugin specific configuration setting file for devstack Change-Id: I936f87b8fbc6f90130514b2fc0d111eab861da7c Implements: blueprint nuage-networks-plugin --- lib/neutron_plugins/nuage | 69 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 lib/neutron_plugins/nuage diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage new file mode 100644 index 0000000000..3649f39bfd --- /dev/null +++ b/lib/neutron_plugins/nuage @@ -0,0 +1,69 @@ +# Nuage Neutron Plugin +# ---------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +function neutron_plugin_create_nova_conf { + NOVA_OVS_BRIDGE=${NOVA_OVS_BRIDGE:-"br-int"} + iniset $NOVA_CONF DEFAULT neutron_ovs_bridge $NOVA_OVS_BRIDGE + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver + iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER +} + +function neutron_plugin_install_agent_packages { + : +} + +function neutron_plugin_configure_common { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nuage + Q_PLUGIN_CONF_FILENAME=nuage_plugin.ini + Q_DB_NAME="nuage_neutron" + Q_PLUGIN_CLASS="neutron.plugins.nuage.plugin.NuagePlugin" + Q_PLUGIN_EXTENSIONS_PATH=neutron/plugins/nuage/extensions + #Nuage specific Neutron defaults. Actual value must be set and sourced + NUAGE_CNA_SERVERS=${NUAGE_CNA_SERVERS:-'localhost:8443'} + NUAGE_CNA_SERVER_AUTH=${NUAGE_CNA_SERVER_AUTH:-'username:password'} + NUAGE_CNA_ORGANIZATION=${NUAGE_CNA_ORGANIZATION:-'org'} + NUAGE_CNA_SERVER_SSL=${NUAGE_CNA_SERVER_SSL:-'True'} + NUAGE_CNA_BASE_URI=${NUAGE_CNA_BASE_URI:-'/'} + NUAGE_CNA_AUTH_RESOURCE=${NUAGE_CNA_AUTH_RESOURCE:-'/'} + NUAGE_CNA_DEF_NETPART_NAME=${NUAGE_CNA_DEF_NETPART_NAME:-''} +} + +function neutron_plugin_configure_debug_command { + : +} + +function neutron_plugin_configure_dhcp_agent { + : +} + +function neutron_plugin_configure_l3_agent { + : +} + +function neutron_plugin_configure_plugin_agent { + : +} + +function neutron_plugin_configure_service { + iniset $NEUTRON_CONF DEFAULT api_extensions_path neutron/plugins/nuage/extensions/ + iniset /$Q_PLUGIN_CONF_FILE restproxy base_uri $NUAGE_CNA_BASE_URI + iniset /$Q_PLUGIN_CONF_FILE restproxy serverssl $NUAGE_CNA_SERVER_SSL + iniset /$Q_PLUGIN_CONF_FILE restproxy serverauth $NUAGE_CNA_SERVER_AUTH + iniset /$Q_PLUGIN_CONF_FILE restproxy organization $NUAGE_CNA_ORGANIZATION + iniset /$Q_PLUGIN_CONF_FILE restproxy server $NUAGE_CNA_SERVERS + iniset /$Q_PLUGIN_CONF_FILE restproxy auth_resource $NUAGE_CNA_AUTH_RESOURCE + iniset /$Q_PLUGIN_CONF_FILE restproxy default_net_partition_name $NUAGE_CNA_DEF_NETPART_NAME +} + +function has_neutron_plugin_security_group { + # 1 means False here + return 1 +} + +# Restore xtrace +$MY_XTRACE From 8068455a023063b615fc66ee038211a9ae300a81 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 5 Mar 2014 11:50:23 -0600 Subject: [PATCH 0553/4438] Close all logging file descriptors This has lingered for a long time, finally do something about it... Change-Id: Ib90408187698d5d4c23ffb0e527011446efc3c7e --- stack.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/stack.sh b/stack.sh index ab1e8fe94d..32dac0f443 100755 --- a/stack.sh +++ b/stack.sh @@ -1419,3 +1419,9 @@ fi # Indicate how long this took to run (bash maintained variable ``SECONDS``) echo_summary "stack.sh completed in $SECONDS seconds." + +# Restore/close logging file descriptors +exec 1>&3 +exec 2>&3 +exec 3>&- +exec 6>&- From 961328fc4622b16135d6d580429dc3e5db01ded5 Mon Sep 17 00:00:00 2001 From: Flavio Percoco Date: Wed, 5 Mar 2014 18:45:56 +0100 Subject: [PATCH 0554/4438] Fix marconi's storage setting for MongoDB The storage driver should be set to mongodb and the driver's uri to the mongodb:// uri. Change-Id: I6193a5d78f6cd7283b4e3b1831978883b9e99b06 --- lib/marconi | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index 29ae386d9f..8f4f3c6bbc 100644 --- a/lib/marconi +++ b/lib/marconi @@ -105,7 +105,8 @@ function configure_marconi { iniset $MARCONI_CONF keystone_authtoken signing_dir $MARCONI_AUTH_CACHE_DIR if [[ "$MARCONI_BACKEND" = 'mongodb' ]]; then - iniset $MARCONI_CONF database connection mongodb://localhost:27017/marconi + iniset $MARCONI_CONF drivers storage mongodb + iniset $MARCONI_CONF 'drivers:storage:mongodb' uri mongodb://localhost:27017/marconi configure_mongodb cleanup_marconi fi From 5fc5b7e231710c2d67522d1bcabdc448dadd0f94 Mon Sep 17 00:00:00 2001 From: Flavio Percoco Date: Wed, 5 Mar 2014 18:49:02 +0100 Subject: [PATCH 0555/4438] Add support for sqlalchemy to Marconi This patch adds a way to setup a marconi instance using sqlalchemy. Change-Id: Ia694b76286835ca2ca935814370aa43544fe84fa --- lib/marconi | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index 8f4f3c6bbc..1e0cc7df08 100644 --- a/lib/marconi +++ b/lib/marconi @@ -104,7 +104,10 @@ function configure_marconi { iniset $MARCONI_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $MARCONI_CONF keystone_authtoken signing_dir $MARCONI_AUTH_CACHE_DIR - if [[ "$MARCONI_BACKEND" = 'mongodb' ]]; then + if [ "$MARCONI_BACKEND" = 'mysql' ] || [ "$MARCONI_BACKEND" = 'postgresql' ] ; then + iniset $MARCONI_CONF drivers storage sqlalchemy + iniset $MARCONI_CONF 'drivers:storage:sqlalchemy' uri `database_connection_url marconi` + else iniset $MARCONI_CONF drivers storage mongodb iniset $MARCONI_CONF 'drivers:storage:mongodb' uri mongodb://localhost:27017/marconi configure_mongodb From d46d9dd8de00d07eee9170365b1a025f0fc01ed9 Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Wed, 5 Mar 2014 13:38:19 +0000 Subject: [PATCH 0556/4438] Inject all account details in tempest.conf The tempest configuration function did not inject all account details in tempest.conf. The only reason why it worked, was because tempest uses default config values which are valid for the current devstack setup. To remove this dependency, two patches are needed: - this one in devstack, to inject all values - https://review.openstack.org/#/c/77602/ in tempest, to change default values to None Partially fixes bug 1287191 Change-Id: I01507b142703a1ff66707464b9a743e9d0ca3e01 --- lib/tempest | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 16f8744d85..8455aae170 100644 --- a/lib/tempest +++ b/lib/tempest @@ -149,8 +149,12 @@ function configure_tempest { password=${ADMIN_PASSWORD:-secrete} - # See files/keystone_data.sh where alt_demo user - # and tenant are set up... + # See files/keystone_data.sh and stack.sh where admin, demo and alt_demo + # user and tenant are set up... + ADMIN_USERNAME=${ADMIN_USERNAME:-admin} + ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin} + TEMPEST_USERNAME=${TEMPEST_USERNAME:-demo} + TEMPEST_TENANT_NAME=${TEMPEST_TENANT_NAME:-demo} ALT_USERNAME=${ALT_USERNAME:-alt_demo} ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo} @@ -254,11 +258,15 @@ function configure_tempest { # Identity iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v3/" + iniset $TEMPEST_CONFIG identity username $TEMPEST_USERNAME iniset $TEMPEST_CONFIG identity password "$password" + iniset $TEMPEST_CONFIG identity tenant_name $TEMPEST_TENANT_NAME iniset $TEMPEST_CONFIG identity alt_username $ALT_USERNAME iniset $TEMPEST_CONFIG identity alt_password "$password" iniset $TEMPEST_CONFIG identity alt_tenant_name $ALT_TENANT_NAME + iniset $TEMPEST_CONFIG identity admin_username $ADMIN_USERNAME iniset $TEMPEST_CONFIG identity admin_password "$password" + iniset $TEMPEST_CONFIG identity admin_tenant_name $ADMIN_TENANT_NAME # Image # for the gate we want to be able to override this variable so we aren't @@ -285,7 +293,9 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method # Compute admin + iniset $TEMPEST_CONFIG "compute-admin" username $USERNAME iniset $TEMPEST_CONFIG "compute-admin" password "$password" # DEPRECATED + iniset $TEMPEST_CONFIG "compute-admin" tenant_name $TENANT_NAME # Network iniset $TEMPEST_CONFIG network api_version 2.0 From 99b622a936c0b6f5b6283f3bcdca3bd7d0628e29 Mon Sep 17 00:00:00 2001 From: Ryan Hsu Date: Wed, 5 Mar 2014 15:35:49 -0800 Subject: [PATCH 0557/4438] Refactor vmdk upload code A syntax error is hit when trying to upload a flat vmdk file that is accompanied by a descriptor file. The code block that handles this has some unneeded characters that cause the error. Also, an else-block has been removed so that we can remove an extra indent. Change-Id: Iaf5c914e09da6831eeeec141228b39554a1e2216 Closes-bug: #1288471 --- functions | 51 +++++++++++++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 26 deletions(-) diff --git a/functions b/functions index ab8319b0ce..1d30922916 100644 --- a/functions +++ b/functions @@ -163,38 +163,37 @@ function upload_image { if [[ ${IMAGE_NAME: -5} != "-flat" ]]; then warn $LINENO "Expected filename suffix: '-flat'."` `" Filename provided: ${IMAGE_NAME}" - else - descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk" - path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` - flat_path="${image_url:0:$path_len}" - descriptor_url=$flat_path$descriptor_fname - warn $LINENO "$descriptor_data_pair_msg"` - `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" - if [[ $flat_path != file* ]]; then - if [[ ! -f $FILES/$descriptor_fname || \ - "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then - wget -c $descriptor_url -O $FILES/$descriptor_fname - if [[ $? -ne 0 ]]; then - warn $LINENO "Descriptor not found $descriptor_url" - descriptor_found=false - fi - fi - descriptor_url="$FILES/$descriptor_fname" - else - descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g") - if [[ ! -f $descriptor_url || \ - "$(stat -c "%s" $descriptor_url)" == "0" ]]; then + fi + + descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk" + path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` + flat_path="${image_url:0:$path_len}" + descriptor_url=$flat_path$descriptor_fname + warn $LINENO "$descriptor_data_pair_msg"` + `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" + if [[ $flat_path != file* ]]; then + if [[ ! -f $FILES/$descriptor_fname || \ + "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then + wget -c $descriptor_url -O $FILES/$descriptor_fname + if [[ $? -ne 0 ]]; then warn $LINENO "Descriptor not found $descriptor_url" descriptor_found=false fi fi - if $descriptor_found; then - vmdk_adapter_type="$(head -25 $descriptor_url |"` - `" { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })" - vmdk_adapter_type="${vmdk_adapter_type#*\"}" - vmdk_adapter_type="${vmdk_adapter_type%?}" + descriptor_url="$FILES/$descriptor_fname" + else + descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g") + if [[ ! -f $descriptor_url || \ + "$(stat -c "%s" $descriptor_url)" == "0" ]]; then + warn $LINENO "Descriptor not found $descriptor_url" + descriptor_found=false fi fi + if $descriptor_found; then + vmdk_adapter_type="$(head -25 $descriptor_url | { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })" + vmdk_adapter_type="${vmdk_adapter_type#*\"}" + vmdk_adapter_type="${vmdk_adapter_type%?}" + fi vmdk_disktype="preallocated" else vmdk_disktype="preallocated" From 581f0ee48510d8eead8a95888ad9b56d89009a76 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Thu, 20 Feb 2014 16:28:15 +0100 Subject: [PATCH 0558/4438] Add a few missing package for SUSE Additionally rearranged the package list to be alphabetically sorrted Change-Id: I52cea97da60437250d0b7cf86a71e4a05d765568 --- files/rpms-suse/baremetal | 1 + files/rpms-suse/general | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) create mode 100644 files/rpms-suse/baremetal diff --git a/files/rpms-suse/baremetal b/files/rpms-suse/baremetal new file mode 100644 index 0000000000..61f73eeae3 --- /dev/null +++ b/files/rpms-suse/baremetal @@ -0,0 +1 @@ +dnsmasq diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 704947ea53..6d994eaf7a 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -1,15 +1,20 @@ +bc bridge-utils ca-certificates-mozilla curl euca2ools +findutils-locate # useful when debugging git-core iputils +libopenssl-devel # to rebuild pyOpenSSL if needed +lsof # useful when debugging +make openssh openssl psmisc -python-setuptools # instead of python-distribute; dist:sle11sp2 python-cmd2 # dist:opensuse-12.3 python-pylint +python-setuptools # instead of python-distribute; dist:sle11sp2 python-unittest2 screen tar @@ -17,7 +22,3 @@ tcpdump unzip vim-enhanced wget -bc - -findutils-locate # useful when debugging -lsof # useful when debugging From 4d8af4aa05a76219b634d02485ae637a404b399f Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Thu, 6 Mar 2014 15:07:53 +0400 Subject: [PATCH 0559/4438] Add n-obj to stop_nova Add missing nova-object service to nova services list Change-Id: Ib26204b69356ad030ba3d03f095993370fbb2676 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 583a5923ce..2d8715ba48 100644 --- a/lib/nova +++ b/lib/nova @@ -731,7 +731,7 @@ function stop_nova { # Kill the nova screen windows # Some services are listed here twice since more than one instance # of a service may be running in certain configs. - for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta; do + for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do screen_stop $serv done if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then From 423d7901a4cd6bc95188e023625b4e21251fad28 Mon Sep 17 00:00:00 2001 From: Nadya Privalova Date: Thu, 6 Mar 2014 15:14:59 +0400 Subject: [PATCH 0560/4438] Add an ability to configure debug-level for ceilometer Change-Id: Ibe9dd2391202a5af291d2eed1559bae60370f9a8 --- lib/ceilometer | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ceilometer b/lib/ceilometer index 04c1a34b8b..b0899e2f24 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -129,6 +129,7 @@ function configure_ceilometer { iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications' iniset $CEILOMETER_CONF DEFAULT verbose True + iniset $CEILOMETER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" # Install the policy file for the API server cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR From d44517dfcfacb5aa9e1952847a1505fd3a92580b Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Tue, 28 Jan 2014 20:29:18 +0000 Subject: [PATCH 0561/4438] Add support for configuring OVS to work with OpenDaylight This adds support for running OpenDaylight as an OpenStack Neutron plugin under devstack. This entails downloading the latest version of OpenDaylight, configuring it, and running it as a service under devstack. This code also includes pieces which configure Open vSwitch on each devstack node to point at OpenDaylight as their OpenFlow and OVSDB control interface. This is required for compute hosts, which will not be running any Neutron software on them at all. This post-devstack configuration is handled in the extras directory because of the fact there is no Neutron code running on the compute hosts themselves. Closes-bug: #1273917 Change-Id: I696e7c7fe63c835f90c56105775def305a702877 --- extras.d/80-opendaylight.sh | 67 ++++++++++++++ files/apts/opendaylight | 2 + files/rpms-suse/opendaylight | 4 + files/rpms/opendaylight | 1 + lib/opendaylight | 167 +++++++++++++++++++++++++++++++++++ 5 files changed, 241 insertions(+) create mode 100644 extras.d/80-opendaylight.sh create mode 100644 files/apts/opendaylight create mode 100644 files/rpms-suse/opendaylight create mode 100644 files/rpms/opendaylight create mode 100644 lib/opendaylight diff --git a/extras.d/80-opendaylight.sh b/extras.d/80-opendaylight.sh new file mode 100644 index 0000000000..cc5c8dec1a --- /dev/null +++ b/extras.d/80-opendaylight.sh @@ -0,0 +1,67 @@ +# opendaylight.sh - DevStack extras script + +# Need this first to get the is_***_enabled for ODL +source $TOP_DIR/lib/opendaylight + +if is_service_enabled odl-server; then + if [[ "$1" == "source" ]]; then + # no-op + : + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + install_opendaylight + configure_opendaylight + init_opendaylight + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + # This has to start before Neutron + start_opendaylight + elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then + # no-op + : + fi + + if [[ "$1" == "unstack" ]]; then + stop_opendaylight + cleanup_opendaylight + fi + + if [[ "$1" == "clean" ]]; then + # no-op + : + fi +fi + +if is_service_enabled odl-compute; then + if [[ "$1" == "source" ]]; then + # no-op + : + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + install_opendaylight-compute + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + create_nova_conf_neutron + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + echo_summary "Initializing OpenDaylight" + ODL_LOCAL_IP=${ODL_LOCAL_IP:-$HOST_IP} + ODL_MGR_PORT=${ODL_MGR_PORT:-6640} + read ovstbl <<< $(sudo ovs-vsctl get Open_vSwitch . _uuid) + sudo ovs-vsctl set-manager tcp:$ODL_MGR_IP:$ODL_MGR_PORT + sudo ovs-vsctl set Open_vSwitch $ovstbl other_config={"local_ip"="$ODL_LOCAL_IP"} + elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then + # no-op + : + fi + + if [[ "$1" == "unstack" ]]; then + sudo ovs-vsctl del-manager + BRIDGES=$(sudo ovs-vsctl list-br) + for bridge in $BRIDGES ; do + sudo ovs-vsctl del-controller $bridge + done + + stop_opendaylight-compute + fi + + if [[ "$1" == "clean" ]]; then + # no-op + : + fi +fi diff --git a/files/apts/opendaylight b/files/apts/opendaylight new file mode 100644 index 0000000000..ec3cc9daf8 --- /dev/null +++ b/files/apts/opendaylight @@ -0,0 +1,2 @@ +openvswitch-datapath-dkms # NOPRIME +openvswitch-switch # NOPRIME diff --git a/files/rpms-suse/opendaylight b/files/rpms-suse/opendaylight new file mode 100644 index 0000000000..d6c7146331 --- /dev/null +++ b/files/rpms-suse/opendaylight @@ -0,0 +1,4 @@ +openvswitch # NOPRIME +openvswitch-controller # NOPRIME +openvswitch-switch # NOPRIME + diff --git a/files/rpms/opendaylight b/files/rpms/opendaylight new file mode 100644 index 0000000000..98aaaf48f7 --- /dev/null +++ b/files/rpms/opendaylight @@ -0,0 +1 @@ +openvswitch # NOPRIME diff --git a/lib/opendaylight b/lib/opendaylight new file mode 100644 index 0000000000..ca81c20e55 --- /dev/null +++ b/lib/opendaylight @@ -0,0 +1,167 @@ +# lib/opendaylight +# Functions to control the configuration and operation of the opendaylight service + +# Dependencies: +# +# - ``functions`` file +# # ``DEST`` must be defined +# # ``STACK_USER`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# - is_opendaylight_enabled +# - is_opendaylight-compute_enabled +# - install_opendaylight +# - install_opendaylight-compute +# - configure_opendaylight +# - init_opendaylight +# - start_opendaylight +# - stop_opendaylight-compute +# - stop_opendaylight +# - cleanup_opendaylight + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# For OVS_BRIDGE and PUBLIC_BRIDGE +source $TOP_DIR/lib/neutron_plugins/ovs_base + +# Defaults +# -------- + +# The IP address of ODL. Set this in local.conf. +# ODL_MGR_IP= +ODL_MGR_IP=${ODL_MGR_IP:-$SERVICE_HOST} + +# +ODL_DIR=$DEST/opendaylight + +# The OpenDaylight Package, currently using 'Hydrogen' release +ODL_PKG=${ODL_PKG:-distributions-virtualization-0.1.1-osgipackage.zip} + +# The OpenDaylight URL +ODL_URL=${ODL_URL:-https://nexus.opendaylight.org/content/repositories/opendaylight.release/org/opendaylight/integration/distributions-virtualization/0.1.1} + +# Default arguments for OpenDaylight. This is typically used to set +# Java memory options. +# ODL_ARGS=Xmx1024m -XX:MaxPermSize=512m +ODL_ARGS=${ODL_ARGS:-"-XX:MaxPermSize=384m"} + +# How long to pause after ODL starts to let it complete booting +ODL_BOOT_WAIT=${ODL_BOOT_WAIT:-60} + +# Set up default directories + + +# Entry Points +# ------------ + +# Test if OpenDaylight is enabled +# is_opendaylight_enabled +function is_opendaylight_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"odl-" ]] && return 0 + return 1 +} + +# cleanup_opendaylight() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_opendaylight { + : +} + +# configure_opendaylight() - Set config files, create data dirs, etc +function configure_opendaylight { + # Remove simple forwarder + rm -f $ODL_DIR/opendaylight/plugins/org.opendaylight.controller.samples.simpleforwarding* + + # Configure OpenFlow 1.3 + echo "ovsdb.of.version=1.3" >> $ODL_DIR/opendaylight/configuration/config.ini +} + +# init_opendaylight() - Initialize databases, etc. +function init_opendaylight { + # clean up from previous (possibly aborted) runs + # create required data files + : +} + +# install_opendaylight() - Collect source and prepare +function install_opendaylight { + local _pwd=$(pwd) + + if is_ubuntu; then + install_package maven openjdk-7-jre openjdk-7-jdk + else + yum_install maven java-1.7.0-openjdk + fi + + # Download OpenDaylight + mkdir -p $ODL_DIR + cd $ODL_DIR + wget -N $ODL_URL/$ODL_PKG + unzip -u $ODL_PKG +} + +# install_opendaylight-compute - Make sure OVS is install +function install_opendaylight-compute { + local kernel_version + # Install deps + # FIXME add to ``files/apts/neutron``, but don't install if not needed! + if is_ubuntu; then + kernel_version=`cat /proc/version | cut -d " " -f3` + install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version + elif is_fedora; then + install_package openvswitch + # Ensure that the service is started + restart_service openvswitch + elif is_suse; then + install_package openvswitch + restart_service openvswitch-switch + restart_service openvswitch-controller + fi +} + +# start_opendaylight() - Start running processes, including screen +function start_opendaylight { + if is_ubuntu; then + JHOME=/usr/lib/jvm/java-1.7.0-openjdk-amd64 + else + JHOME=/usr/lib/jvm/java-1.7.0-openjdk + fi + + # The flags to ODL have the following meaning: + # -of13: runs ODL using OpenFlow 1.3 protocol support. + # -virt ovsdb: Runs ODL in "virtualization" mode with OVSDB support + screen_it odl-server "cd $ODL_DIR/opendaylight && JAVE_HOME=$JHOME ./run.sh $ODL_ARGS -of13 -virt ovsdb" + + # Sleep a bit to let OpenDaylight finish starting up + sleep $ODL_BOOT_WAIT +} + +# stop_opendaylight() - Stop running processes (non-screen) +function stop_opendaylight { + screen_stop odl-server +} + +# stop_opendaylight-compute() - Remove OVS bridges +function stop_opendaylight-compute { + # remove all OVS ports that look like Neutron created ports + for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do + sudo ovs-vsctl del-port ${port} + done + + # remove all OVS bridges created by Neutron + for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BRIDGE} -e ${PUBLIC_BRIDGE}); do + sudo ovs-vsctl del-br ${bridge} + done +} + +# Restore xtrace +$XTRACE + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: From a99b869d3c14b33d0cf59877f3ae60686763f8ae Mon Sep 17 00:00:00 2001 From: Sergey Skripnick Date: Wed, 5 Mar 2014 14:47:58 +0200 Subject: [PATCH 0562/4438] Do not restart libvirt if n-cpu is disabled If this service is disable in localrc, libvirt does not installed at all, and should not be restarted. Change-Id: Iaf482d4a82a26546c25249b3e32c7e629d862a1b Closes: bug 1288236 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 583a5923ce..2f6d04db78 100644 --- a/lib/nova +++ b/lib/nova @@ -308,7 +308,7 @@ function configure_nova { # Rebuild the config file from scratch create_nova_conf - if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then # Configure hypervisor plugin configure_nova_hypervisor fi From b44a8ef14f4e177aef0528db2b7721030f76b290 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 6 Mar 2014 11:25:04 -0600 Subject: [PATCH 0563/4438] Fix errexit in lib/ldap clear_ldap_state() deletes an object from the DIT that doesn't exist on the first run, this is OK but fails with errexit enabled. Change-Id: I3b881eedc891caa6b2dfd5913e43f3babcfa7d47 --- lib/ldap | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ldap b/lib/ldap index 51d02519af..efe2f096d7 100644 --- a/lib/ldap +++ b/lib/ldap @@ -154,7 +154,7 @@ function stop_ldap { # clear_ldap_state() - Clear LDAP State function clear_ldap_state { - ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN" + ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN" || : } # Restore xtrace From 1eae3e155a25faa8e0bb6ddba77e580c774fd265 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 6 Mar 2014 11:49:22 -0600 Subject: [PATCH 0564/4438] Make stop_swift() more robust for Grenade stop_swift() wasn't calling screen_stop() so the pid files and screen sessions were not being cleaned up. DevStack doesn't really care but Grenade does for the 'base' copy of DevStack. This should be backported to stable/havana for this reason. Change-Id: Ib5afb321cef2b7ad74e69a3fd0d1dad469f78b11 --- lib/swift | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/swift b/lib/swift index 5d4d4ef506..b8bc1b66e7 100644 --- a/lib/swift +++ b/lib/swift @@ -687,6 +687,11 @@ function stop_swift { swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true fi # Dump all of the servers + # Maintain the iteration as screen_stop() has some desirable side-effects + for type in proxy object container account; do + screen_stop s-${type} + done + # Blast out any stragglers pkill -f swift- } From f5d2a5ceb4030aa0868b11ef84b5055b70693702 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 6 Mar 2014 13:45:42 -0500 Subject: [PATCH 0565/4438] test for adding crazy branches as found by dansmith's clever hack, if devstack lands a crazy branch name in stackrc, we'd break the devstack gate. While it's doubtful anyone would do this, add a basic sanity check. Change-Id: Ib3b1881ed4fd520a1828ed073a7c8353e6f0a839 --- run_tests.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/run_tests.sh b/run_tests.sh index a0bfbee0c0..685b2037f0 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -27,3 +27,16 @@ fi echo "Running bash8..." ./tools/bash8.py -v $FILES + + +# Test that no one is trying to land crazy refs as branches + +echo "Ensuring we don't have crazy refs" + +REFS=`grep BRANCH stackrc | grep -v -- '-master'` +rc=$? +if [[ $rc -eq 0 ]]; then + echo "Branch defaults must be master. Found:" + echo $REFS + exit 1 +fi From 07f1d0ef3d638d2289a45a17546e976907e004ee Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Thu, 6 Mar 2014 23:23:01 +0000 Subject: [PATCH 0566/4438] Iniset keystone auth version Introduces support for suth_version config flag in lib/tempest. The variable is named TEMPEST_AUTH_VERSION, and it can be set via localrc, so that the devstack-vm-gate-wrap may control it. The aim is to setup a keystone v3 based experimental check job in tempest experimental pipeline. Partially implements bp multi-keystone-api-version-tests Change-Id: Ia6832d87308c6c7109e6ae0dbd8dff61134718ee --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 8455aae170..b90988d1d9 100644 --- a/lib/tempest +++ b/lib/tempest @@ -267,6 +267,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG identity admin_username $ADMIN_USERNAME iniset $TEMPEST_CONFIG identity admin_password "$password" iniset $TEMPEST_CONFIG identity admin_tenant_name $ADMIN_TENANT_NAME + iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v2} # Image # for the gate we want to be able to override this variable so we aren't From e530ba30a6965c016934819be5b1cfcaa6879b75 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 7 Mar 2014 05:58:18 -0500 Subject: [PATCH 0567/4438] make compute-admin correct we lost the admin tenant at some point in the last couple of days which disabled 500 tempest tests. Bring this back. Change-Id: I5cab2074777cab99982ae8fc4a83663e9d128284 --- lib/tempest | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index 8455aae170..b3736da963 100644 --- a/lib/tempest +++ b/lib/tempest @@ -293,9 +293,9 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method # Compute admin - iniset $TEMPEST_CONFIG "compute-admin" username $USERNAME - iniset $TEMPEST_CONFIG "compute-admin" password "$password" # DEPRECATED - iniset $TEMPEST_CONFIG "compute-admin" tenant_name $TENANT_NAME + iniset $TEMPEST_CONFIG "compute-admin" username $ADMIN_USERNAME + iniset $TEMPEST_CONFIG "compute-admin" password "$password" + iniset $TEMPEST_CONFIG "compute-admin" tenant_name $ADMIN_TENANT_NAME # Network iniset $TEMPEST_CONFIG network api_version 2.0 From bb1e07859cce688e3beed2c573e9073a72f778fb Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 6 Mar 2014 09:40:27 -0800 Subject: [PATCH 0568/4438] Don't install vim or locate by default Devstack doesn't need vim or locate, if someone wants to use them, they can just install them afterwards. Change-Id: I00f27c20c86d89465e4aefc67ed645a309c09a03 --- files/apts/general | 2 -- files/rpms-suse/general | 2 -- tools/xen/prepare_guest.sh | 2 +- 3 files changed, 1 insertion(+), 5 deletions(-) diff --git a/files/apts/general b/files/apts/general index 32d31f0642..995c0c6f88 100644 --- a/files/apts/general +++ b/files/apts/general @@ -9,8 +9,6 @@ git lsof # useful when debugging openssh-server openssl -vim-nox -locate # useful when debugging python-virtualenv python-unittest2 iputils-ping diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 6d994eaf7a..ff27a3aac7 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -3,7 +3,6 @@ bridge-utils ca-certificates-mozilla curl euca2ools -findutils-locate # useful when debugging git-core iputils libopenssl-devel # to rebuild pyOpenSSL if needed @@ -20,5 +19,4 @@ screen tar tcpdump unzip -vim-enhanced wget diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index 440774ec5b..2b5e418a6a 100755 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -73,7 +73,7 @@ EOF # Install basics apt-get update apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool -apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo python-netaddr +apt-get install -y curl wget ssh openssh-server python-pip git sudo python-netaddr pip install xenapi # Install XenServer guest utilities From b27f16d71660f75fcd82a035cdaf2b2eddec99ce Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 28 Feb 2014 14:29:02 +1100 Subject: [PATCH 0569/4438] Detect missing packages with yum yum -y doesn't report an error when packages are missing (see [1] for upstream discussion). Thus we run the output of yum through a small awk script looking for missing packages output. The one change required for RHEL is that python-wsgiref is included in the distro python, so doesn't need a separate package. [1] https://bugzilla.redhat.com/show_bug.cgi?id=965567 Change-Id: I9908ff4edbf2b0d961d25837a08a34e1417bbb02 --- files/rpms/glance | 2 +- functions-common | 17 ++++++++++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/files/rpms/glance b/files/rpms/glance index 25c5d3902b..534097a92f 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -13,6 +13,6 @@ python-lxml #dist:f18,f19,f20,rhel7 python-paste-deploy #dist:f18,f19,f20,rhel7 python-routes python-sqlalchemy -python-wsgiref +python-wsgiref #dist:f18,f19,f20 pyxattr zlib-devel # testonly diff --git a/functions-common b/functions-common index 0db3ff3e7c..ed3d8832fd 100644 --- a/functions-common +++ b/functions-common @@ -938,9 +938,24 @@ function yum_install { [[ "$OFFLINE" = "True" ]] && return local sudo="sudo" [[ "$(id -u)" = "0" ]] && sudo="env" + + # The manual check for missing packages is because yum -y assumes + # missing packages are OK. See + # https://bugzilla.redhat.com/show_bug.cgi?id=965567 $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ no_proxy=$no_proxy \ - yum install -y "$@" + yum install -y "$@" 2>&1 | \ + awk ' + BEGIN { fail=0 } + /No package/ { fail=1 } + { print } + END { exit fail }' || \ + die $LINENO "Missing packages detected" + + # also ensure we catch a yum failure + if [[ ${PIPESTATUS[0]} != 0 ]]; then + die $LINENO "Yum install failure" + fi } # zypper wrapper to set arguments correctly From f19ccb63593e4c3e6c1c2a7d4f2552c30ca1ee62 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Sat, 8 Mar 2014 07:54:05 -0800 Subject: [PATCH 0570/4438] Take tempurl out of Swift pipeline additions Swift commit 165dd44 added tempurl to the sample config, so now it appears twice in the default devstack-installed configuration. This commit removes tempurl from $SWIFT_EXTRAS_MIDDLEWARE so that it only appears once in the generated proxy pipeline. Change-Id: I4204b2a444312ab87c17f5fb296a43818a4528a6 --- lib/swift | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/swift b/lib/swift index b8bc1b66e7..b65544046a 100644 --- a/lib/swift +++ b/lib/swift @@ -67,8 +67,8 @@ fi SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_DEFAULT} # Set ``SWIFT_EXTRAS_MIDDLEWARE`` to extras middlewares. -# Default is ``staticweb, tempurl, formpost`` -SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb} +# Default is ``staticweb, formpost`` +SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-formpost staticweb} # Set ``SWIFT_EXTRAS_MIDDLEWARE_LAST`` to extras middlewares that need to be at # the end of the pipeline. From 11b36c9b0a0a04ff3a53ae95c6de94fdd457f5e7 Mon Sep 17 00:00:00 2001 From: Roey Chen Date: Mon, 10 Mar 2014 11:25:50 +0200 Subject: [PATCH 0571/4438] Fixed unconditioned source phase in OpenDaylight extras Should source ``lib/opendaylight`` in ``extras.d/80-opendaylight.sh`` only when appropriate services are enabled. Fix for bug/1290033 Change-Id: Ifa470e1e132029f3c5bf255f27c4e96373b339a8 Signed-off-by: Roey Chen --- extras.d/80-opendaylight.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/extras.d/80-opendaylight.sh b/extras.d/80-opendaylight.sh index cc5c8dec1a..57b43288e0 100644 --- a/extras.d/80-opendaylight.sh +++ b/extras.d/80-opendaylight.sh @@ -1,7 +1,9 @@ # opendaylight.sh - DevStack extras script -# Need this first to get the is_***_enabled for ODL -source $TOP_DIR/lib/opendaylight +if is_service_enabled odl-server odl-compute; then + # Initial source + [[ "$1" == "source" ]] && source $TOP_DIR/lib/opendaylight +fi if is_service_enabled odl-server; then if [[ "$1" == "source" ]]; then From d9259ea466e54349fa87e7f76b7dfd061b19423c Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 10 Mar 2014 08:39:15 -0400 Subject: [PATCH 0572/4438] remove distros that are out of support by their upstream raring EOL was - 27 Jan 2014 f18 EOL was - 14 Jan 2014 opensuse 12.2 was - 15 Jan 2014 if their upstream isn't going to support them, we shouldn't be in devstack. this additionally leaves us in an interesting situation that there is no longer *any* opensuse version listed as supported. if the opensuse community doesn't step up here we should probably look at removing it. Change-Id: Ibb883930b430477dfd3b5126c5db04f95a50d3a7 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 148ce04e28..e76a55c534 100755 --- a/stack.sh +++ b/stack.sh @@ -142,7 +142,7 @@ disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (precise|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f18|f19|f20|opensuse-12.2|rhel6) ]]; then +if [[ ! ${DISTRO} =~ (precise|saucy|trusty|7.0|wheezy|sid|testing|jessie|f19|f20|rhel6) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 353c4f1240d974e9ce93ba1f00a4bc7fe2c5856e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 10 Mar 2014 08:44:18 -0400 Subject: [PATCH 0573/4438] remove additional f18 references f18 has been EOL for 6 weeks now, time to purge it from devstack Change-Id: I5aac2c63b2f4cd8b01ae685b1acf4c188637558b --- files/rpms/cinder | 2 +- files/rpms/glance | 4 ++-- files/rpms/horizon | 4 ++-- files/rpms/keystone | 8 ++++---- files/rpms/neutron | 4 ++-- files/rpms/nova | 6 +++--- files/rpms/swift | 2 +- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/files/rpms/cinder b/files/rpms/cinder index 199ae10b79..423d57cd98 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -4,4 +4,4 @@ qemu-img python-devel postgresql-devel iscsi-initiator-utils -python-lxml #dist:f18,f19,f20,rhel7 +python-lxml #dist:f19,f20,rhel7 diff --git a/files/rpms/glance b/files/rpms/glance index 25c5d3902b..c886ecee10 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -9,8 +9,8 @@ python-argparse python-devel python-eventlet python-greenlet -python-lxml #dist:f18,f19,f20,rhel7 -python-paste-deploy #dist:f18,f19,f20,rhel7 +python-lxml #dist:f19,f20,rhel7 +python-paste-deploy #dist:f19,f20,rhel7 python-routes python-sqlalchemy python-wsgiref diff --git a/files/rpms/horizon b/files/rpms/horizon index 59503cc9aa..2dd24e0763 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -16,8 +16,8 @@ python-kombu python-migrate python-mox python-nose -python-paste #dist:f18,f19,f20 -python-paste-deploy #dist:f18,f19,f20 +python-paste #dist:f19,f20 +python-paste-deploy #dist:f19,f20 python-routes python-sphinx python-sqlalchemy diff --git a/files/rpms/keystone b/files/rpms/keystone index 99e8524628..7182091b31 100644 --- a/files/rpms/keystone +++ b/files/rpms/keystone @@ -1,9 +1,9 @@ python-greenlet libxslt-devel # dist:f20 -python-lxml #dist:f18,f19,f20 -python-paste #dist:f18,f19,f20 -python-paste-deploy #dist:f18,f19,f20 -python-paste-script #dist:f18,f19,f20 +python-lxml #dist:f19,f20 +python-paste #dist:f19,f20 +python-paste-deploy #dist:f19,f20 +python-paste-script #dist:f19,f20 python-routes python-sqlalchemy python-webob diff --git a/files/rpms/neutron b/files/rpms/neutron index 42d7f68d37..06ea0ea35d 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron @@ -11,8 +11,8 @@ python-greenlet python-iso8601 python-kombu #rhel6 gets via pip -python-paste # dist:f18,f19,f20,rhel7 -python-paste-deploy # dist:f18,f19,f20,rhel7 +python-paste # dist:f19,f20,rhel7 +python-paste-deploy # dist:f19,f20,rhel7 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/nova b/files/rpms/nova index a607d925e1..45d6e0bfb1 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -28,11 +28,11 @@ python-kombu python-lockfile python-migrate python-mox -python-paramiko # dist:f18,f19,f20,rhel7 +python-paramiko # dist:f19,f20,rhel7 # ^ on RHEL6, brings in python-crypto which conflicts with version from # pip we need -python-paste # dist:f18,f19,f20,rhel7 -python-paste-deploy # dist:f18,f19,f20,rhel7 +python-paste # dist:f19,f20,rhel7 +python-paste-deploy # dist:f19,f20,rhel7 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/swift b/files/rpms/swift index 72253f7752..bf29ea29b7 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -9,7 +9,7 @@ python-eventlet python-greenlet python-netifaces python-nose -python-paste-deploy # dist:f18,f19,f20,rhel7 +python-paste-deploy # dist:f19,f20,rhel7 python-simplejson python-webob pyxattr From 13349080b11383697f7c5312c357cc6c336ff9ba Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 10 Mar 2014 11:27:23 -0400 Subject: [PATCH 0574/4438] put libvirt debug in the right place libvirt debug setting was happening in a place where we weren't actually resetting the daemon. Move it to into the hypervisor plugin where we do. Change-Id: Ia79b0ef50f6b8fb007a20ce5cb4e510a5e4600a5 --- lib/nova | 11 ----------- lib/nova_plugins/hypervisor-libvirt | 10 ++++++++++ 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/lib/nova b/lib/nova index 583a5923ce..f5e0d11281 100644 --- a/lib/nova +++ b/lib/nova @@ -665,17 +665,6 @@ function start_nova_compute { fi if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - # Enable client side traces for libvirt - local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" - local log_outputs="1:file:/var/log/libvirt/libvirtd.log" - # Enable server side traces for libvirtd - if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then - echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf - fi - if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then - echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf - fi - # The group **$LIBVIRT_GROUP** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'" diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index bbf65546f7..26880e5850 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -103,6 +103,16 @@ EOF fi add_user_to_group $STACK_USER $LIBVIRT_GROUP + # Enable server side traces for libvirtd + local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" + local log_outputs="1:file:/var/log/libvirt/libvirtd.log" + if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then + echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi + if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then + echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi + # libvirt detects various settings on startup, as we potentially changed # the system configuration (modules, filesystems), we need to restart # libvirt to detect those changes. From 2983474e37d6c97c482e154a1f0d1f60a709915b Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 9 Mar 2014 18:36:42 +0100 Subject: [PATCH 0575/4438] Use the $SERVICE_HOST in backup_swift_url The $SERVICE_HOST is used to specify the swift proxy endpoint, the c-bak should use the same endpoint. Change-Id: Ia815f514839b0d1ec3fb9bb40992637c4f123e06 --- lib/cinder | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/cinder b/lib/cinder index dd2956a5b4..dadbe40a3b 100644 --- a/lib/cinder +++ b/lib/cinder @@ -274,6 +274,10 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT lock_path $CINDER_STATE_PATH iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL + if is_service_enabled swift; then + iniset $CINDER_CONF DEFAULT backup_swift_url "http://$SERVICE_HOST:8080/v1/AUTH_" + fi + if is_service_enabled ceilometer; then iniset $CINDER_CONF DEFAULT notification_driver "cinder.openstack.common.notifier.rpc_notifier" fi From cea32b1f86631761e170413124dbf80972234a8c Mon Sep 17 00:00:00 2001 From: Aaron Rosen Date: Tue, 4 Mar 2014 16:20:14 -0800 Subject: [PATCH 0576/4438] Configuration needed for neutron nova callback Change-Id: I07cb476f5e87e967cd6fbbfc82881e8a147453b4 --- lib/neutron | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/lib/neutron b/lib/neutron index bb591abb0b..84e827761a 100644 --- a/lib/neutron +++ b/lib/neutron @@ -110,6 +110,10 @@ Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} # nova vif driver that all plugins should use NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} +Q_NOTIFY_NOVA_PORT_STATUS_CHANGE=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGE:-True} +Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_CHANGE:-True} +VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} +VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} # The next two variables are configured by plugin # e.g. _configure_neutron_l3_agent or lib/neutron_plugins/* @@ -313,6 +317,9 @@ function create_nova_conf_neutron { if is_service_enabled q-meta; then iniset $NOVA_CONF DEFAULT service_neutron_metadata_proxy "True" fi + + iniset $NOVA_CONF DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL" + iniset $NOVA_CONF DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT" } # create_neutron_cache_dir() - Part of the _neutron_setup_keystone() process @@ -754,6 +761,16 @@ function _configure_neutron_service { iniset $NEUTRON_CONF DEFAULT ${I/=/ } done + # Configuration for neutron notifations to nova. + iniset $NEUTRON_CONF DEFAULT notify_nova_port_status_change $Q_NOTIFY_NOVA_PORT_STATUS_CHANGE + iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES + iniset $NEUTRON_CONF DEFAULT nova_url "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2" + iniset $NEUTRON_CONF DEFAULT nova_admin_username nova $NOVA_USER + iniset $NEUTRON_CONF DEFAULT nova_admin_password $SERVICE_PASSWORD + ADMIN_TENANT_ID=$(openstack project list | awk "/ service / { print \$2 }") + iniset $NEUTRON_CONF DEFAULT nova_admin_tenant_id $ADMIN_TENANT_ID + iniset $NEUTRON_CONF DEFAULT nova_admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" + # Configure plugin neutron_plugin_configure_service } From 42a59c2bfae69eca5520748d6b45803a387fdb88 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 3 Mar 2014 14:31:29 -0600 Subject: [PATCH 0577/4438] Complete moving Keystone setup out of keystone_data.sh * Move remaining role creation to create_keystone_accounts() * Move glance creation to create_glance_accounts() * Move nova/ec2/s3 creation to create_nova_accounts() * Move ceilometer creation to create_ceilometer_accounts() * Move tempest creation to create_tempest_accounts() * Convert moved code to use OpenStackClient for setup * files/keystone_data.sh is removed Note that the SERVICE_TENANT and ADMIN_ROLE lookups in the other service implementations are not necessary with OSC, all operations can be done using names rather than requiring IDs. Change-Id: I4283ca0036ae39fd44ed2eed834b69d78e4f8257 --- extras.d/80-tempest.sh | 2 +- files/keystone_data.sh | 146 ----------------------------------------- lib/ceilometer | 12 ++++ lib/glance | 43 ++++++++++++ lib/keystone | 19 ++++-- lib/nova | 47 ++++++++++++- lib/tempest | 24 +++++++ stack.sh | 21 ++---- 8 files changed, 146 insertions(+), 168 deletions(-) delete mode 100755 files/keystone_data.sh diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh index 0186e36aee..74f4c60d10 100644 --- a/extras.d/80-tempest.sh +++ b/extras.d/80-tempest.sh @@ -9,7 +9,7 @@ if is_service_enabled tempest; then install_tempest elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then # Tempest config must come after layer 2 services are running - : + create_tempest_accounts elif [[ "$1" == "stack" && "$2" == "extra" ]]; then echo_summary "Initializing Tempest" configure_tempest diff --git a/files/keystone_data.sh b/files/keystone_data.sh deleted file mode 100755 index fc1e8136a4..0000000000 --- a/files/keystone_data.sh +++ /dev/null @@ -1,146 +0,0 @@ -#!/bin/bash -# -# Initial data for Keystone using python-keystoneclient -# -# Tenant User Roles -# ------------------------------------------------------------------ -# service glance service -# service glance-swift ResellerAdmin -# service heat service # if enabled -# service ceilometer admin # if enabled -# Tempest Only: -# alt_demo alt_demo Member -# -# Variables set before calling this script: -# SERVICE_TOKEN - aka admin_token in keystone.conf -# SERVICE_ENDPOINT - local Keystone admin endpoint -# SERVICE_TENANT_NAME - name of tenant containing service accounts -# SERVICE_HOST - host used for endpoint creation -# ENABLED_SERVICES - stack.sh's list of services to start -# DEVSTACK_DIR - Top-level DevStack directory -# KEYSTONE_CATALOG_BACKEND - used to determine service catalog creation - -# Defaults -# -------- - -ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete} -SERVICE_PASSWORD=${SERVICE_PASSWORD:-$ADMIN_PASSWORD} -export SERVICE_TOKEN=$SERVICE_TOKEN -export SERVICE_ENDPOINT=$SERVICE_ENDPOINT -SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} - -# Roles -# ----- - -# The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it. -# The admin role in swift allows a user to act as an admin for their tenant, -# but ResellerAdmin is needed for a user to act as any tenant. The name of this -# role is also configurable in swift-proxy.conf -keystone role-create --name=ResellerAdmin -# Service role, so service users do not have to be admins -keystone role-create --name=service - - -# Services -# -------- - -if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then - # Nova needs ResellerAdmin role to download images when accessing - # swift through the s3 api. - keystone user-role-add \ - --tenant $SERVICE_TENANT_NAME \ - --user nova \ - --role ResellerAdmin -fi - -# Glance -if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then - keystone user-create \ - --name=glance \ - --pass="$SERVICE_PASSWORD" \ - --tenant $SERVICE_TENANT_NAME \ - --email=glance@example.com - keystone user-role-add \ - --tenant $SERVICE_TENANT_NAME \ - --user glance \ - --role service - # required for swift access - if [[ "$ENABLED_SERVICES" =~ "s-proxy" ]]; then - keystone user-create \ - --name=glance-swift \ - --pass="$SERVICE_PASSWORD" \ - --tenant $SERVICE_TENANT_NAME \ - --email=glance-swift@example.com - keystone user-role-add \ - --tenant $SERVICE_TENANT_NAME \ - --user glance-swift \ - --role ResellerAdmin - fi - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - keystone service-create \ - --name=glance \ - --type=image \ - --description="Glance Image Service" - keystone endpoint-create \ - --region RegionOne \ - --service glance \ - --publicurl "http://$SERVICE_HOST:9292" \ - --adminurl "http://$SERVICE_HOST:9292" \ - --internalurl "http://$SERVICE_HOST:9292" - fi -fi - -# Ceilometer -if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then - # Ceilometer needs ResellerAdmin role to access swift account stats. - keystone user-role-add --tenant $SERVICE_TENANT_NAME \ - --user ceilometer \ - --role ResellerAdmin -fi - -# EC2 -if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - keystone service-create \ - --name=ec2 \ - --type=ec2 \ - --description="EC2 Compatibility Layer" - keystone endpoint-create \ - --region RegionOne \ - --service ec2 \ - --publicurl "http://$SERVICE_HOST:8773/services/Cloud" \ - --adminurl "http://$SERVICE_HOST:8773/services/Admin" \ - --internalurl "http://$SERVICE_HOST:8773/services/Cloud" - fi -fi - -# S3 -if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift3" ]]; then - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - keystone service-create \ - --name=s3 \ - --type=s3 \ - --description="S3" - keystone endpoint-create \ - --region RegionOne \ - --service s3 \ - --publicurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ - --adminurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ - --internalurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" - fi -fi - -if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then - # Tempest has some tests that validate various authorization checks - # between two regular users in separate tenants - keystone tenant-create \ - --name=alt_demo - keystone user-create \ - --name=alt_demo \ - --pass="$ADMIN_PASSWORD" \ - --email=alt_demo@example.com - keystone user-role-add \ - --tenant alt_demo \ - --user alt_demo \ - --role Member -fi diff --git a/lib/ceilometer b/lib/ceilometer index 04c1a34b8b..b8305b1e9e 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -69,6 +69,11 @@ function is_ceilometer_enabled { # create_ceilometer_accounts() - Set up common required ceilometer accounts +# Project User Roles +# ------------------------------------------------------------------ +# SERVICE_TENANT_NAME ceilometer admin +# SERVICE_TENANT_NAME ceilometer ResellerAdmin (if Swift is enabled) + create_ceilometer_accounts() { SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") @@ -99,6 +104,13 @@ create_ceilometer_accounts() { --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" fi + if is_service_enabled swift; then + # Ceilometer needs ResellerAdmin role to access swift account stats. + openstack role add \ + --project $SERVICE_TENANT_NAME \ + --user ceilometer \ + ResellerAdmin + fi fi } diff --git a/lib/glance b/lib/glance index 8a4c21b3f2..51e4399388 100644 --- a/lib/glance +++ b/lib/glance @@ -159,6 +159,49 @@ function configure_glance { cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON } +# create_glance_accounts() - Set up common required glance accounts + +# Project User Roles +# ------------------------------------------------------------------ +# SERVICE_TENANT_NAME glance service +# SERVICE_TENANT_NAME glance-swift ResellerAdmin (if Swift is enabled) + +function create_glance_accounts { + if is_service_enabled g-api; then + openstack user create \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT_NAME \ + glance + openstack role add \ + --project $SERVICE_TENANT_NAME \ + --user glance \ + service + # required for swift access + if is_service_enabled s-proxy; then + openstack user create \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT_NAME \ + glance-swift + openstack role add \ + --project $SERVICE_TENANT_NAME \ + --user glance-swift \ + ResellerAdmin + fi + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + openstack service create \ + --type image \ + --description "Glance Image Service" \ + glance + openstack endpoint create \ + --region RegionOne \ + --publicurl "http://$GLANCE_HOSTPORT" \ + --adminurl "http://$GLANCE_HOSTPORT" \ + --internalurl "http://$GLANCE_HOSTPORT" \ + glance + fi + fi +} + # create_glance_cache_dir() - Part of the init_glance() process function create_glance_cache_dir { # Create cache dir diff --git a/lib/keystone b/lib/keystone index c6856c95c3..b31cc57a56 100644 --- a/lib/keystone +++ b/lib/keystone @@ -266,9 +266,11 @@ function configure_keystone { # Tenant User Roles # ------------------------------------------------------------------ +# admin admin admin # service -- -- +# -- -- service +# -- -- ResellerAdmin # -- -- Member -# admin admin admin # demo admin admin # demo demo Member, anotherrole # invisible_to_admin demo Member @@ -294,10 +296,17 @@ function create_keystone_accounts { --project $ADMIN_TENANT \ --user $ADMIN_USER - # service - SERVICE_TENANT=$(openstack project create \ - $SERVICE_TENANT_NAME \ - | grep " id " | get_field 2) + # Create service project/role + openstack project create $SERVICE_TENANT_NAME + + # Service role, so service users do not have to be admins + openstack role create service + + # The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it. + # The admin role in swift allows a user to act as an admin for their tenant, + # but ResellerAdmin is needed for a user to act as any tenant. The name of this + # role is also configurable in swift-proxy.conf + openstack role create ResellerAdmin # The Member role is used by Horizon and Swift so we need to keep it: MEMBER_ROLE=$(openstack role create \ diff --git a/lib/nova b/lib/nova index 583a5923ce..a7c44211ca 100644 --- a/lib/nova +++ b/lib/nova @@ -316,9 +316,10 @@ function configure_nova { # create_nova_accounts() - Set up common required nova accounts -# Tenant User Roles +# Project User Roles # ------------------------------------------------------------------ -# service nova admin, [ResellerAdmin (swift only)] +# SERVICE_TENANT_NAME nova admin +# SERVICE_TENANT_NAME nova ResellerAdmin (if Swift is enabled) # Migrated from keystone_data.sh create_nova_accounts() { @@ -363,6 +364,48 @@ create_nova_accounts() { --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" fi fi + + if is_service_enabled n-api; then + # Swift + if is_service_enabled swift; then + # Nova needs ResellerAdmin role to download images when accessing + # swift through the s3 api. + openstack role add \ + --project $SERVICE_TENANT_NAME \ + --user nova \ + ResellerAdmin + fi + + # EC2 + if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then + openstack service create \ + --type ec2 \ + --description "EC2 Compatibility Layer" \ + ec2 + openstack endpoint create \ + --region RegionOne \ + --publicurl "http://$SERVICE_HOST:8773/services/Cloud" \ + --adminurl "http://$SERVICE_HOST:8773/services/Admin" \ + --internalurl "http://$SERVICE_HOST:8773/services/Cloud" \ + ec2 + fi + fi + + # S3 + if is_service_enabled n-obj swift3; then + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + openstack service create \ + --type s3 \ + --description "S3" \ + s3 + openstack endpoint create \ + --region RegionOne \ + --publicurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ + --adminurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ + --internalurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ + s3 + fi + fi } # create_nova_conf() - Create a new nova.conf file diff --git a/lib/tempest b/lib/tempest index 16f8744d85..897efa8a8f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -358,6 +358,30 @@ function configure_tempest { $errexit } +# create_tempest_accounts() - Set up common required tempest accounts + +# Project User Roles +# ------------------------------------------------------------------ +# alt_demo alt_demo Member + +# Migrated from keystone_data.sh +function create_tempest_accounts { + if is_service_enabled tempest; then + # Tempest has some tests that validate various authorization checks + # between two regular users in separate tenants + openstack project create \ + alt_demo + openstack user create \ + --project alt_demo \ + --password "$ADMIN_PASSWORD" \ + alt_demo + openstack role add \ + --project alt_demo \ + --user alt_demo \ + Member + fi +} + # install_tempest() - Collect source and prepare function install_tempest { git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH diff --git a/stack.sh b/stack.sh index c990a1c6ca..f8973ee98f 100755 --- a/stack.sh +++ b/stack.sh @@ -907,14 +907,13 @@ if is_service_enabled key; then SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0 fi - # Do the keystone-specific bits from keystone_data.sh - export OS_SERVICE_TOKEN=$SERVICE_TOKEN - export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT - # Add temporarily to make openstackclient work + # Setup OpenStackclient token-flow auth export OS_TOKEN=$SERVICE_TOKEN export OS_URL=$SERVICE_ENDPOINT + create_keystone_accounts create_nova_accounts + create_glance_accounts create_cinder_accounts create_neutron_accounts @@ -922,7 +921,7 @@ if is_service_enabled key; then create_ceilometer_accounts fi - if is_service_enabled swift || is_service_enabled s-proxy; then + if is_service_enabled swift; then create_swift_accounts fi @@ -930,20 +929,14 @@ if is_service_enabled key; then create_heat_accounts fi - # ``keystone_data.sh`` creates services, admin and demo users, and roles. - ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ - SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ - S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \ - DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \ - bash -x $FILES/keystone_data.sh - - # Set up auth creds now that keystone is bootstrapped + # Begone token-flow auth unset OS_TOKEN OS_URL + + # Set up password-flow auth creds now that keystone is bootstrapped export OS_AUTH_URL=$SERVICE_ENDPOINT export OS_TENANT_NAME=admin export OS_USERNAME=admin export OS_PASSWORD=$ADMIN_PASSWORD - unset OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT fi From 936284b02ab6365bb0bcde49b617a57a902d491c Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 11 Mar 2014 09:35:55 +1100 Subject: [PATCH 0578/4438] Make mongo install for ceilometer NOPRIME mongodb packages are missing on some platforms, so we switch to a manual install. Also gate the mongo call in cleanup Change-Id: I1755e461c66be30da3db2a0994f908503c4c38ea --- files/apts/ceilometer-collector | 4 ++-- files/rpms/ceilometer-collector | 4 ++-- lib/ceilometer | 21 ++++++++++++++++++--- 3 files changed, 22 insertions(+), 7 deletions(-) diff --git a/files/apts/ceilometer-collector b/files/apts/ceilometer-collector index 71007ba4c5..f1b692ac71 100644 --- a/files/apts/ceilometer-collector +++ b/files/apts/ceilometer-collector @@ -1,5 +1,5 @@ -python-pymongo -mongodb-server +python-pymongo #NOPRIME +mongodb-server #NOPRIME libnspr4-dev pkg-config libxml2-dev diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector index c91bac36a2..9cf580d22d 100644 --- a/files/rpms/ceilometer-collector +++ b/files/rpms/ceilometer-collector @@ -1,4 +1,4 @@ selinux-policy-targeted -mongodb-server -pymongo +mongodb-server #NOPRIME +pymongo # NOPRIME mongodb # NOPRIME diff --git a/lib/ceilometer b/lib/ceilometer index b0899e2f24..6aaddcefad 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -106,7 +106,9 @@ create_ceilometer_accounts() { # cleanup_ceilometer() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_ceilometer { - mongo ceilometer --eval "db.dropDatabase();" + if [ "$CEILOMETER_BACKEND" != 'mysql' ] && [ "$CEILOMETER_BACKEND" != 'postgresql' ] ; then + mongo ceilometer --eval "db.dropDatabase();" + fi } # configure_ceilometerclient() - Set config files, create data dirs, etc @@ -164,14 +166,27 @@ function configure_ceilometer { } function configure_mongodb { + # server package is the same on all + local packages=mongodb-server + + if is_fedora; then + # mongodb client + python bindings + packages="${packages} mongodb pymongo" + else + packages="${packages} python-pymongo" + fi + + install_package ${packages} + if is_fedora; then - # install mongodb client - install_package mongodb # ensure smallfiles selected to minimize freespace requirements sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod restart_service mongod fi + + # give mongodb time to start-up + sleep 5 } # init_ceilometer() - Initialize etc. From ccb3d10e04f7be773daf1bddd0bc2bff024ce6f4 Mon Sep 17 00:00:00 2001 From: Newell Jensen Date: Mon, 10 Mar 2014 14:28:52 -0700 Subject: [PATCH 0579/4438] Makes error message easier to understand. If the host ip address is indeterminate while executing stack.sh, an error message is displayed. This error message could be a source of confusion since it references localrc, which is depreciated. This patch makes the error message clearer and easier to understand. It does this by taking out the reference to localrc. It also points the user towards local.conf where there are suggestions on how to set HOST_IP. Change-Id: I41f14a2de85449d2a08ab7eb2849844a1087b147 Closes-Bug: #1290556 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 148ce04e28..817da26a8e 100755 --- a/stack.sh +++ b/stack.sh @@ -289,7 +289,7 @@ FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} HOST_IP=$(get_default_host_ip $FIXED_RANGE $FLOATING_RANGE "$HOST_IP_IFACE" "$HOST_IP") if [ "$HOST_IP" == "" ]; then - die $LINENO "Could not determine host ip address. Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted" + die $LINENO "Could not determine host ip address. See local.conf for suggestions on setting HOST_IP." fi # Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints. From c20bab89c47e02d88fb314d4d0a8dbfc73fca20e Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Tue, 11 Mar 2014 11:38:24 +0100 Subject: [PATCH 0580/4438] Use the python-pyOpenSSL package openSUSE Recent pyOpenSSL releases when installed from pip depend on cryptography>=0.2.1, which itself depends on cffi>=0.8. That is conflicting with the python-cffi (0.7.2) package on openSUSE-13.1 which is required by the installed python-xattr. Change-Id: I721ce5288d150a3b01fb2558f7ca86028d734138 --- files/rpms-suse/glance | 1 + 1 file changed, 1 insertion(+) diff --git a/files/rpms-suse/glance b/files/rpms-suse/glance index dd68ac08c8..d9844e9bb4 100644 --- a/files/rpms-suse/glance +++ b/files/rpms-suse/glance @@ -8,5 +8,6 @@ python-devel python-eventlet python-greenlet python-iso8601 +python-pyOpenSSL python-wsgiref python-xattr From 3b1f2e4e885559957a939f8a260b4cff9938bc80 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Mon, 24 Feb 2014 20:30:07 +0900 Subject: [PATCH 0581/4438] Fix inverted conditionals in setup_develop This fixes regressions introduced by: Change-Id: Ic97e68348f46245b271567893b447fcedbd7bd6e ("Handle non-zero exit code from git diff") Change-Id: I053a292c287f3035eef37db2264eda06a170f9bc Closes-Bug: 1287513 --- functions-common | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index ed3d8832fd..90cd3dfa72 100644 --- a/functions-common +++ b/functions-common @@ -1248,7 +1248,7 @@ function setup_develop { # ``errexit`` requires us to trap the exit code when the repo is changed local update_requirements=$(cd $project_dir && git diff --exit-code >/dev/null || echo "changed") - if [[ $update_requirements = "changed" ]]; then + if [[ $update_requirements != "changed" ]]; then (cd $REQUIREMENTS_DIR; \ $SUDO_CMD python update.py $project_dir) fi @@ -1264,7 +1264,7 @@ function setup_develop { # a variable that tells us whether or not we should UNDO the requirements # changes (this will be set to False in the OpenStack ci gate) if [ $UNDO_REQUIREMENTS = "True" ]; then - if [[ $update_requirements = "changed" ]]; then + if [[ $update_requirements != "changed" ]]; then (cd $project_dir && git reset --hard) fi fi From dd304603e011160f7f796ec4af7dcaf50008372c Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 11 Mar 2014 16:38:57 -0400 Subject: [PATCH 0582/4438] put libvirt debug behind a flag only turn on the libvirt debugging if we really need it, which we could control in the gate via devstack-gate. Change-Id: I5e6d41d5333357608ab6a614610c060400f70a10 --- lib/nova_plugins/hypervisor-libvirt | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 26880e5850..5a51f33808 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -25,6 +25,8 @@ set +o xtrace # File injection is disabled by default in Nova. This will turn it back on. ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False} +# if we should turn on massive libvirt debugging +DEBUG_LIBVIRT=$(trueorfalse False $DEBUG_LIBVIRT) # Entry Points @@ -104,13 +106,15 @@ EOF add_user_to_group $STACK_USER $LIBVIRT_GROUP # Enable server side traces for libvirtd - local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" - local log_outputs="1:file:/var/log/libvirt/libvirtd.log" - if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then - echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf - fi - if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then - echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf + if [[ "$DEBUG_LIBVIRT" = "True" ]] ; then + local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" + local log_outputs="1:file:/var/log/libvirt/libvirtd.log" + if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then + echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi + if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then + echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi fi # libvirt detects various settings on startup, as we potentially changed From d78c4057d4ae53a994eefb4d4b0ee01a9365e5d5 Mon Sep 17 00:00:00 2001 From: Hemanth Ravi Date: Sun, 26 Jan 2014 17:30:11 -0800 Subject: [PATCH 0583/4438] Install script for One Convergence Neutron plugin. Change-Id: I1dcc625a7c986e7533820b01af9eee5b8addcffe Implements: install for blueprint oc-nvsd-neutron-plugin --- lib/neutron_plugins/oneconvergence | 76 ++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 lib/neutron_plugins/oneconvergence diff --git a/lib/neutron_plugins/oneconvergence b/lib/neutron_plugins/oneconvergence new file mode 100644 index 0000000000..0aebff629c --- /dev/null +++ b/lib/neutron_plugins/oneconvergence @@ -0,0 +1,76 @@ +# Neutron One Convergence plugin +# --------------------------- +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/neutron_plugins/ovs_base + +Q_L3_ENABLED=true +Q_L3_ROUTER_PER_TENANT=true +Q_USE_NAMESPACE=true + +function neutron_plugin_install_agent_packages { + _neutron_ovs_base_install_agent_packages +} +# Configure common parameters +function neutron_plugin_configure_common { + + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/oneconvergence + Q_PLUGIN_CONF_FILENAME=nvsdplugin.ini + Q_PLUGIN_CLASS="neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2" + Q_DB_NAME='oc_nvsd_neutron' +} + +# Configure plugin specific information +function neutron_plugin_configure_service { + iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_ip $NVSD_IP + iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_port $NVSD_PORT + iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_user $NVSD_USER + iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_passwd $NVSD_PASSWD +} + +function neutron_plugin_configure_debug_command { + _neutron_ovs_base_configure_debug_command +} + +function neutron_plugin_setup_interface_driver { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver +} + +function has_neutron_plugin_security_group { + # 1 means False here + return 0 +} + +function setup_integration_bridge { + _neutron_ovs_base_setup_bridge $OVS_BRIDGE +} + +function neutron_plugin_configure_dhcp_agent { + setup_integration_bridge + iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport +} + +function neutron_plugin_configure_l3_agent { + _neutron_ovs_base_configure_l3_agent + iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport +} + +function neutron_plugin_configure_plugin_agent { + + AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nvsd-agent" + + _neutron_ovs_base_configure_firewall_driver +} + +function neutron_plugin_create_nova_conf { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + if ( is_service_enabled n-cpu && ! ( is_service_enabled q-dhcp )) ; then + setup_integration_bridge + fi +} + +# Restore xtrace +$MY_XTRACE From 7d4c7e09b4882077471c3b2cb097c237c2016f96 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 12 Mar 2014 08:05:08 -0400 Subject: [PATCH 0584/4438] remove docker from devstack with I1c9bea2fdeebc4199c4f7d8fca4580a6fb7fed5b nova removed docker from it's driver tree. We shouldn't have driver support inside of devstack that's not part of upstream projects (this has been a line we've been pretty clear on with Neutron drivers in the past). Remove docker driver accordingly. Change-Id: Ib91d415ea1616d99a5c5e7bc3b9015392fda5847 --- README.md | 6 +- exercises/boot_from_volume.sh | 3 - exercises/euca.sh | 3 - exercises/floating_ips.sh | 3 - exercises/sec_groups.sh | 3 - exercises/volumes.sh | 3 - lib/nova_plugins/hypervisor-docker | 132 ----------------------------- stackrc | 3 - tools/docker/README.md | 13 --- tools/docker/install_docker.sh | 68 --------------- 10 files changed, 1 insertion(+), 236 deletions(-) delete mode 100644 lib/nova_plugins/hypervisor-docker delete mode 100644 tools/docker/README.md delete mode 100755 tools/docker/install_docker.sh diff --git a/README.md b/README.md index 9914b1ed69..a0f5b2689d 100644 --- a/README.md +++ b/README.md @@ -73,7 +73,7 @@ does not run if started as root. This is a recent change (Oct 2013) from the previous behaviour of automatically creating a ``stack`` user. Automatically creating user accounts is not the right response to running as root, so -that bit is now an explicit step using ``tools/create-stack-user.sh``. +that bit is now an explicit step using ``tools/create-stack-user.sh``. Run that (as root!) or just check it out to see what DevStack's expectations are for the account it runs under. Many people simply use their usual login (the default 'ubuntu' login on a UEC image @@ -253,10 +253,6 @@ If tempest has been successfully configured, a basic set of smoke tests can be r If you would like to use Xenserver as the hypervisor, please refer to the instructions in `./tools/xen/README.md`. -# DevStack on Docker - -If you would like to use Docker as the hypervisor, please refer to the instructions in `./tools/docker/README.md`. - # Additional Projects DevStack has a hook mechanism to call out to a dispatch script at specific diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index f679669eea..dff8e7a632 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -44,9 +44,6 @@ source $TOP_DIR/exerciserc # the exercise is skipped is_service_enabled cinder || exit 55 -# Also skip if the hypervisor is Docker -[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 - # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/exercises/euca.sh b/exercises/euca.sh index ad852a4f79..3768b56d4e 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -40,9 +40,6 @@ source $TOP_DIR/exerciserc # the exercise is skipped is_service_enabled n-api || exit 55 -# Skip if the hypervisor is Docker -[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 - # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 8b7b96197e..1416d4dc6a 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -40,9 +40,6 @@ source $TOP_DIR/exerciserc # the exercise is skipped is_service_enabled n-api || exit 55 -# Skip if the hypervisor is Docker -[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 - # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh index d71a1e0755..5f8b0a4d5d 100755 --- a/exercises/sec_groups.sh +++ b/exercises/sec_groups.sh @@ -37,9 +37,6 @@ source $TOP_DIR/exerciserc # the exercise is skipped is_service_enabled n-api || exit 55 -# Skip if the hypervisor is Docker -[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 - # Testing Security Groups # ======================= diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 83d25c779c..0d556df9e7 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -41,9 +41,6 @@ source $TOP_DIR/exerciserc # exercise is skipped. is_service_enabled cinder || exit 55 -# Also skip if the hypervisor is Docker -[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 - # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker deleted file mode 100644 index fd3c4fefc8..0000000000 --- a/lib/nova_plugins/hypervisor-docker +++ /dev/null @@ -1,132 +0,0 @@ -# lib/nova_plugins/docker -# Configure the Docker hypervisor - -# Enable with: -# -# VIRT_DRIVER=docker - -# Dependencies: -# -# - ``functions`` file -# - ``nova`` and ``glance`` configurations - -# install_nova_hypervisor - install any external requirements -# configure_nova_hypervisor - make configuration changes, including those to other services -# start_nova_hypervisor - start any external services -# stop_nova_hypervisor - stop any external services -# cleanup_nova_hypervisor - remove transient data and cache - -# Save trace setting -MY_XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default directories -DOCKER_DIR=$DEST/docker - -DOCKER_UNIX_SOCKET=/var/run/docker.sock -DOCKER_PID_FILE=/var/run/docker.pid -DOCKER_REGISTRY_PORT=${DOCKER_REGISTRY_PORT:-5042} - -DOCKER_IMAGE=${DOCKER_IMAGE:-cirros:latest} -DOCKER_IMAGE_NAME=$DEFAULT_IMAGE_NAME -DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-registry:latest} -DOCKER_REGISTRY_IMAGE_NAME=registry -DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME} - -DOCKER_APT_REPO=${DOCKER_APT_REPO:-https://get.docker.io/ubuntu} - - -# Entry Points -# ------------ - -# clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor { - stop_service docker - - # Clean out work area - sudo rm -rf /var/lib/docker -} - -# configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor { - iniset $NOVA_CONF DEFAULT compute_driver docker.DockerDriver - iniset $GLANCE_API_CONF DEFAULT container_formats ami,ari,aki,bare,ovf,docker -} - -# is_docker_running - Return 0 (true) if Docker is running, otherwise 1 -function is_docker_running { - local docker_pid - if [ -f "$DOCKER_PID_FILE" ]; then - docker_pid=$(cat "$DOCKER_PID_FILE") - fi - if [[ -z "$docker_pid" ]] || ! ps -p "$docker_pid" | grep [d]ocker; then - return 1 - fi - return 0 -} - -# install_nova_hypervisor() - Install external components -function install_nova_hypervisor { - # So far this is Ubuntu only - if ! is_ubuntu; then - die $LINENO "Docker is only supported on Ubuntu at this time" - fi - - # Make sure Docker is installed - if ! is_package_installed lxc-docker; then - die $LINENO "Docker is not installed. Please run tools/docker/install_docker.sh" - fi - - if ! (is_docker_running); then - die $LINENO "Docker not running" - fi -} - -# start_nova_hypervisor - Start any required external services -function start_nova_hypervisor { - if ! (is_docker_running); then - die $LINENO "Docker not running" - fi - - # Start the Docker registry container - docker run -d -p ${DOCKER_REGISTRY_PORT}:5000 \ - -e SETTINGS_FLAVOR=openstack -e OS_USERNAME=${OS_USERNAME} \ - -e OS_PASSWORD=${OS_PASSWORD} -e OS_TENANT_NAME=${OS_TENANT_NAME} \ - -e OS_GLANCE_URL="${SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" \ - -e OS_AUTH_URL=${OS_AUTH_URL} \ - $DOCKER_REGISTRY_IMAGE_NAME ./docker-registry/run.sh - - echo "Waiting for docker registry to start..." - DOCKER_REGISTRY=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT} - if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl -s $DOCKER_REGISTRY; do sleep 1; done"; then - die $LINENO "docker-registry did not start" - fi - - # Tag image if not already tagged - if ! docker images | grep $DOCKER_REPOSITORY_NAME; then - docker tag $DOCKER_IMAGE_NAME $DOCKER_REPOSITORY_NAME - fi - - # Make sure we copied the image in Glance - if ! (glance image-show "$DOCKER_IMAGE"); then - docker push $DOCKER_REPOSITORY_NAME - fi -} - -# stop_nova_hypervisor - Stop any external services -function stop_nova_hypervisor { - # Stop the docker registry container - docker kill $(docker ps | grep docker-registry | cut -d' ' -f1) -} - - -# Restore xtrace -$MY_XTRACE - -# Local variables: -# mode: shell-script -# End: diff --git a/stackrc b/stackrc index 6bb6f37195..756ec275dc 100644 --- a/stackrc +++ b/stackrc @@ -320,9 +320,6 @@ case "$VIRT_DRIVER" in openvz) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64} IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz"};; - docker) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros} - IMAGE_URLS=${IMAGE_URLS:-};; libvirt) case "$LIBVIRT_TYPE" in lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc diff --git a/tools/docker/README.md b/tools/docker/README.md deleted file mode 100644 index 976111f750..0000000000 --- a/tools/docker/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# DevStack on Docker - -Using Docker as Nova's hypervisor requries two steps: - -* Configure DevStack by adding the following to `localrc`:: - - VIRT_DRIVER=docker - -* Download and install the Docker service and images:: - - tools/docker/install_docker.sh - -After this, `stack.sh` should run as normal. diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh deleted file mode 100755 index 27c8c8210b..0000000000 --- a/tools/docker/install_docker.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env bash - -# **install_docker.sh** - Do the initial Docker installation and configuration - -# install_docker.sh -# -# Install docker package and images -# * downloads a base busybox image and a glance registry image if necessary -# * install the images in Docker's image cache - - -# Keep track of the current directory -SCRIPT_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $SCRIPT_DIR/../..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Load local configuration -source $TOP_DIR/stackrc - -FILES=$TOP_DIR/files - -# Get our defaults -source $TOP_DIR/lib/nova_plugins/hypervisor-docker - -SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} - - -# Install Docker Service -# ====================== - -if is_fedora; then - install_package docker-io socat -else - # Stop the auto-repo updates and do it when required here - NO_UPDATE_REPOS=True - - # Set up home repo - curl https://get.docker.io/gpg | sudo apt-key add - - install_package python-software-properties && \ - sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list" - apt_get update - install_package --force-yes lxc-docker socat -fi - -# Start the daemon - restart just in case the package ever auto-starts... -restart_service docker - -echo "Waiting for docker daemon to start..." -DOCKER_GROUP=$(groups | cut -d' ' -f1) -CONFIGURE_CMD="while ! /bin/echo -e 'GET /v1.3/version HTTP/1.0\n\n' | socat - unix-connect:$DOCKER_UNIX_SOCKET 2>/dev/null | grep -q '200 OK'; do - # Set the right group on docker unix socket before retrying - sudo chgrp $DOCKER_GROUP $DOCKER_UNIX_SOCKET - sudo chmod g+rw $DOCKER_UNIX_SOCKET - sleep 1 -done" -if ! timeout $SERVICE_TIMEOUT sh -c "$CONFIGURE_CMD"; then - die $LINENO "docker did not start" -fi - -# Get guest container image -docker pull $DOCKER_IMAGE -docker tag $DOCKER_IMAGE $DOCKER_IMAGE_NAME - -# Get docker-registry image -docker pull $DOCKER_REGISTRY_IMAGE -docker tag $DOCKER_REGISTRY_IMAGE $DOCKER_REGISTRY_IMAGE_NAME From 1749106c3abb17ee7cf30eb69bc9b744f3fc5a95 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Wed, 12 Mar 2014 14:38:25 +0100 Subject: [PATCH 0585/4438] Remove unused package dependencies * /sbin/vconfig command is not used by either nova or neutron. * Now the AMQP carrot is not used, not even optionally by the oslo.messaging. * python-gfalgs just referenced as a similar configuration style, by neutron. Change-Id: Idde5446e47e7da1dd204ea518ab816e2cce77c7d --- files/apts/nova | 2 -- files/rpms-suse/nova | 2 -- files/rpms/neutron | 1 - files/rpms/nova | 3 --- 4 files changed, 8 deletions(-) diff --git a/files/apts/nova b/files/apts/nova index ae925c3293..dfb25c7f37 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -25,7 +25,6 @@ socat # used by ajaxterm python-mox python-paste python-migrate -python-gflags python-greenlet python-libvirt # NOPRIME python-libxml2 @@ -34,7 +33,6 @@ python-numpy # used by websockify for spice console python-pastedeploy python-eventlet python-cheetah -python-carrot python-tempita python-sqlalchemy python-suds diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova index ee4917d702..c3c878fb4a 100644 --- a/files/rpms-suse/nova +++ b/files/rpms-suse/nova @@ -24,7 +24,6 @@ python-Routes python-SQLAlchemy python-Tempita python-boto -python-carrot python-cheetah python-eventlet python-feedparser @@ -37,7 +36,6 @@ python-mox python-mysql python-numpy # needed by websockify for spice console python-paramiko -python-python-gflags python-sqlalchemy-migrate python-suds python-xattr # needed for glance which is needed for nova --- this shouldn't be here diff --git a/files/rpms/neutron b/files/rpms/neutron index 42d7f68d37..e5c901be37 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron @@ -21,4 +21,3 @@ rabbitmq-server # NOPRIME qpid-cpp-server # NOPRIME sqlite sudo -vconfig diff --git a/files/rpms/nova b/files/rpms/nova index a607d925e1..61b0e9a0d1 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -17,11 +17,9 @@ mysql-server # NOPRIME parted polkit python-boto -python-carrot python-cheetah python-eventlet python-feedparser -python-gflags python-greenlet python-iso8601 python-kombu @@ -42,4 +40,3 @@ rabbitmq-server # NOPRIME qpid-cpp-server # NOPRIME sqlite sudo -vconfig From 64bd01652e6fd7c593498b1fd2bf50bfdf64ce40 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 12 Mar 2014 13:04:22 -0400 Subject: [PATCH 0586/4438] make git_clone safer the ensures that if the function returns early, we return to a sane directory, and not hang out somewhere that a future git call might modify a directory in a weird way. This is especially important in the case of stable branches where were are hopping between stable for servers and master for clients. Change-Id: Ib8ebbc23b1813bc1bfb31d0a079f1b882135bd39 --- functions-common | 3 +++ 1 file changed, 3 insertions(+) diff --git a/functions-common b/functions-common index 90cd3dfa72..c6fd5c7163 100644 --- a/functions-common +++ b/functions-common @@ -517,12 +517,14 @@ function git_clone { GIT_DEST=$2 GIT_REF=$3 RECLONE=$(trueorfalse False $RECLONE) + local orig_dir=`pwd` if [[ "$OFFLINE" = "True" ]]; then echo "Running in offline mode, clones already exist" # print out the results so we know what change was used in the logs cd $GIT_DEST git show --oneline | head -1 + cd $orig_dir return fi @@ -572,6 +574,7 @@ function git_clone { # print out the results so we know what change was used in the logs cd $GIT_DEST git show --oneline | head -1 + cd $orig_dir } # git can sometimes get itself infinitely stuck with transient network From 767b5a45b7c6a91a449e0cb41baf16221a7de5e1 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 12 Mar 2014 10:33:15 -0700 Subject: [PATCH 0587/4438] Split up stop_nova to match start_nova Split stop_nova into: stop_nova_compute and stop_nova_rest. This is needed to support the partial-ncpu grenade test where we want to stop everything but nova_compute. Change-Id: I6a21821277e56897d705ca5746806e2211632d12 --- lib/nova | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/lib/nova b/lib/nova index 55103e8dcc..15f56d336b 100644 --- a/lib/nova +++ b/lib/nova @@ -715,17 +715,25 @@ function start_nova { start_nova_rest } -# stop_nova() - Stop running processes (non-screen) -function stop_nova { +function stop_nova_compute { + if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + stop_nova_hypervisor + fi +} + +function stop_nova_rest { # Kill the nova screen windows # Some services are listed here twice since more than one instance # of a service may be running in certain configs. for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do screen_stop $serv done - if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then - stop_nova_hypervisor - fi +} + +# stop_nova() - Stop running processes (non-screen) +function stop_nova { + stop_nova_rest + stop_nova_compute } From 9c6d2840fdb67eb7af34be241bdb2fbebaf67c87 Mon Sep 17 00:00:00 2001 From: Sreeram Yerrapragada Date: Mon, 10 Mar 2014 14:12:58 -0700 Subject: [PATCH 0588/4438] fix failing wget statements under -o errexit in vmdk upload routine Fix the case when uploaded image has no descriptor. Refactored the code a bit Tested: 1. monithic Sparse 2. monolithic flat 2.1 flat file name mentioned in descriptor file 2.1 flat file name not mentioned in descriptor file 3. descriptor header not found in the file 3.1 image file name is *-flat, download descriptor 3.2 image file name does not end with *-flat 4. file name contains all image properties Change-Id: I0df9be5c2a1b9ed53cdb22d5cd40b94e56c48f37 Closes-bug: #1289664 --- functions | 63 ++++++++++++++++++++----------------------------------- 1 file changed, 23 insertions(+), 40 deletions(-) diff --git a/functions b/functions index 1d30922916..e0d2b01d0c 100644 --- a/functions +++ b/functions @@ -122,7 +122,7 @@ function upload_image { flat_fname="$(head -25 $IMAGE | { grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE || true; })" flat_fname="${flat_fname#*\"}" flat_fname="${flat_fname%?}" - if [[ -z "$flat_name" ]]; then + if [[ -z "$flat_fname" ]]; then flat_fname="$IMAGE_NAME-flat.vmdk" fi path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` @@ -133,27 +133,16 @@ function upload_image { if [[ ! -f $FILES/$flat_fname || \ "$(stat -c "%s" $FILES/$flat_fname)" = "0" ]]; then wget -c $flat_url -O $FILES/$flat_fname - if [[ $? -ne 0 ]]; then - echo "Flat disk not found: $flat_url" - flat_found=false - fi - fi - if $flat_found; then - IMAGE="$FILES/${flat_fname}" fi + IMAGE="$FILES/${flat_fname}" else IMAGE=$(echo $flat_url | sed "s/^file:\/\///g") if [[ ! -f $IMAGE || "$(stat -c "%s" $IMAGE)" == "0" ]]; then echo "Flat disk not found: $flat_url" - flat_found=false + return 1 fi - if ! $flat_found; then - IMAGE=$(echo $image_url | sed "s/^file:\/\///g") - fi - fi - if $flat_found; then - IMAGE_NAME="${flat_fname}" fi + IMAGE_NAME="${flat_fname}" vmdk_disktype="preallocated" elif [[ "$vmdk_create_type" = "streamOptimized" ]]; then vmdk_disktype="streamOptimized" @@ -163,33 +152,27 @@ function upload_image { if [[ ${IMAGE_NAME: -5} != "-flat" ]]; then warn $LINENO "Expected filename suffix: '-flat'."` `" Filename provided: ${IMAGE_NAME}" - fi - - descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk" - path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` - flat_path="${image_url:0:$path_len}" - descriptor_url=$flat_path$descriptor_fname - warn $LINENO "$descriptor_data_pair_msg"` - `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" - if [[ $flat_path != file* ]]; then - if [[ ! -f $FILES/$descriptor_fname || \ - "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then - wget -c $descriptor_url -O $FILES/$descriptor_fname - if [[ $? -ne 0 ]]; then - warn $LINENO "Descriptor not found $descriptor_url" - descriptor_found=false - fi - fi - descriptor_url="$FILES/$descriptor_fname" else - descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g") - if [[ ! -f $descriptor_url || \ - "$(stat -c "%s" $descriptor_url)" == "0" ]]; then - warn $LINENO "Descriptor not found $descriptor_url" - descriptor_found=false + descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk" + path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` + flat_path="${image_url:0:$path_len}" + descriptor_url=$flat_path$descriptor_fname + warn $LINENO "$descriptor_data_pair_msg"` + `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" + if [[ $flat_path != file* ]]; then + if [[ ! -f $FILES/$descriptor_fname || \ + "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then + wget -c $descriptor_url -O $FILES/$descriptor_fname + fi + descriptor_url="$FILES/$descriptor_fname" + else + descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g") + if [[ ! -f $descriptor_url || \ + "$(stat -c "%s" $descriptor_url)" == "0" ]]; then + echo "Descriptor not found: $descriptor_url" + return 1 + fi fi - fi - if $descriptor_found; then vmdk_adapter_type="$(head -25 $descriptor_url | { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })" vmdk_adapter_type="${vmdk_adapter_type#*\"}" vmdk_adapter_type="${vmdk_adapter_type%?}" From 7ff8443e46c94562822895b86b24122bc7474cfd Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Mon, 10 Mar 2014 20:04:51 +0400 Subject: [PATCH 0589/4438] Rename all Savanna usages to Sahara There are several backward compatibility nits. Change-Id: I93cac543375896602d158860cc557f86e41bcb63 --- exercises/{savanna.sh => sahara.sh} | 8 +- extras.d/70-sahara.sh | 37 ++++++ extras.d/70-savanna.sh | 37 ------ lib/sahara | 177 ++++++++++++++++++++++++++++ lib/sahara-dashboard | 72 +++++++++++ lib/savanna | 173 --------------------------- lib/savanna-dashboard | 72 ----------- 7 files changed, 290 insertions(+), 286 deletions(-) rename exercises/{savanna.sh => sahara.sh} (88%) create mode 100644 extras.d/70-sahara.sh delete mode 100644 extras.d/70-savanna.sh create mode 100644 lib/sahara create mode 100644 lib/sahara-dashboard delete mode 100644 lib/savanna delete mode 100644 lib/savanna-dashboard diff --git a/exercises/savanna.sh b/exercises/sahara.sh similarity index 88% rename from exercises/savanna.sh rename to exercises/sahara.sh index fc3f9760e5..867920ed31 100755 --- a/exercises/savanna.sh +++ b/exercises/sahara.sh @@ -1,8 +1,8 @@ #!/usr/bin/env bash -# **savanna.sh** +# **sahara.sh** -# Sanity check that Savanna started if enabled +# Sanity check that Sahara started if enabled echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -33,9 +33,9 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc -is_service_enabled savanna || exit 55 +is_service_enabled sahara || exit 55 -curl http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Savanna API not functioning!" +curl http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Sahara API isn't functioning!" set +o xtrace echo "*********************************************************************" diff --git a/extras.d/70-sahara.sh b/extras.d/70-sahara.sh new file mode 100644 index 0000000000..80e07ff7b9 --- /dev/null +++ b/extras.d/70-sahara.sh @@ -0,0 +1,37 @@ +# sahara.sh - DevStack extras script to install Sahara + +if is_service_enabled sahara; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/sahara + source $TOP_DIR/lib/sahara-dashboard + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + echo_summary "Installing sahara" + install_sahara + cleanup_sahara + if is_service_enabled horizon; then + install_sahara_dashboard + fi + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + echo_summary "Configuring sahara" + configure_sahara + create_sahara_accounts + if is_service_enabled horizon; then + configure_sahara_dashboard + fi + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + echo_summary "Initializing sahara" + start_sahara + fi + + if [[ "$1" == "unstack" ]]; then + stop_sahara + if is_service_enabled horizon; then + cleanup_sahara_dashboard + fi + fi + + if [[ "$1" == "clean" ]]; then + cleanup_sahara + fi +fi diff --git a/extras.d/70-savanna.sh b/extras.d/70-savanna.sh deleted file mode 100644 index edc1376deb..0000000000 --- a/extras.d/70-savanna.sh +++ /dev/null @@ -1,37 +0,0 @@ -# savanna.sh - DevStack extras script to install Savanna - -if is_service_enabled savanna; then - if [[ "$1" == "source" ]]; then - # Initial source - source $TOP_DIR/lib/savanna - source $TOP_DIR/lib/savanna-dashboard - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing Savanna" - install_savanna - cleanup_savanna - if is_service_enabled horizon; then - install_savanna_dashboard - fi - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring Savanna" - configure_savanna - create_savanna_accounts - if is_service_enabled horizon; then - configure_savanna_dashboard - fi - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - echo_summary "Initializing Savanna" - start_savanna - fi - - if [[ "$1" == "unstack" ]]; then - stop_savanna - if is_service_enabled horizon; then - cleanup_savanna_dashboard - fi - fi - - if [[ "$1" == "clean" ]]; then - cleanup_savanna - fi -fi diff --git a/lib/sahara b/lib/sahara new file mode 100644 index 0000000000..4cb04ecd3a --- /dev/null +++ b/lib/sahara @@ -0,0 +1,177 @@ +# lib/sahara + +# Dependencies: +# ``functions`` file +# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# install_sahara +# configure_sahara +# start_sahara +# stop_sahara +# cleanup_sahara + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default repos +SAHARA_REPO=${SAHARA_REPO:-${GIT_BASE}/openstack/sahara.git} +SAHARA_BRANCH=${SAHARA_BRANCH:-master} + +# Set up default directories +SAHARA_DIR=$DEST/sahara +SAHARA_CONF_DIR=${SAHARA_CONF_DIR:-/etc/sahara} +SAHARA_CONF_FILE=${SAHARA_CONF_DIR}/sahara.conf +SAHARA_DEBUG=${SAHARA_DEBUG:-True} + +SAHARA_SERVICE_HOST=${SAHARA_SERVICE_HOST:-$SERVICE_HOST} +SAHARA_SERVICE_PORT=${SAHARA_SERVICE_PORT:-8386} +SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} + +SAHARA_AUTH_CACHE_DIR=${SAHARA_AUTH_CACHE_DIR:-/var/cache/sahara} + +# Support entry points installation of console scripts +if [[ -d $SAHARA_DIR/bin ]]; then + SAHARA_BIN_DIR=$SAHARA_DIR/bin +else + SAHARA_BIN_DIR=$(get_python_exec_prefix) +fi + +# Tell Tempest this project is present +TEMPEST_SERVICES+=,sahara + +# For backward compatibility with current tests in Tempest +TEMPEST_SERVICES+=,savanna + + +# Functions +# --------- + +# create_sahara_accounts() - Set up common required sahara accounts +# +# Tenant User Roles +# ------------------------------ +# service sahara admin +function create_sahara_accounts { + + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") + + SAHARA_USER=$(openstack user create \ + sahara \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email sahara@example.com \ + | grep " id " | get_field 2) + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $SAHARA_USER + + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + SAHARA_SERVICE=$(openstack service create \ + sahara \ + --type=data_processing \ + --description="Sahara Data Processing" \ + | grep " id " | get_field 2) + openstack endpoint create \ + $SAHARA_SERVICE \ + --region RegionOne \ + --publicurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ + --adminurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ + --internalurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" + fi +} + +# cleanup_sahara() - Remove residual data files, anything left over from +# previous runs that would need to clean up. +function cleanup_sahara { + + # Cleanup auth cache dir + sudo rm -rf $SAHARA_AUTH_CACHE_DIR +} + +# configure_sahara() - Set config files, create data dirs, etc +function configure_sahara { + + if [[ ! -d $SAHARA_CONF_DIR ]]; then + sudo mkdir -p $SAHARA_CONF_DIR + fi + sudo chown $STACK_USER $SAHARA_CONF_DIR + + # Copy over sahara configuration file and configure common parameters. + # TODO(slukjanov): rename when sahara internals will be updated + cp $SAHARA_DIR/etc/savanna/savanna.conf.sample $SAHARA_CONF_FILE + + # Create auth cache dir + sudo mkdir -p $SAHARA_AUTH_CACHE_DIR + sudo chown $STACK_USER $SAHARA_AUTH_CACHE_DIR + rm -rf $SAHARA_AUTH_CACHE_DIR/* + + # Set obsolete keystone auth configs for backward compatibility + iniset $SAHARA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_SERVICE_HOST + iniset $SAHARA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_SERVICE_PORT + iniset $SAHARA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_SERVICE_PROTOCOL + iniset $SAHARA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD + iniset $SAHARA_CONF_FILE DEFAULT os_admin_username sahara + iniset $SAHARA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME + + # Set actual keystone auth configs + iniset $SAHARA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset $SAHARA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $SAHARA_CONF_FILE keystone_authtoken admin_user sahara + iniset $SAHARA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD + iniset $SAHARA_CONF_FILE keystone_authtoken signing_dir $SAHARA_AUTH_CACHE_DIR + iniset $SAHARA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA + + iniset $SAHARA_CONF_FILE DEFAULT debug $SAHARA_DEBUG + + iniset $SAHARA_CONF_FILE database connection `database_connection_url sahara` + + if is_service_enabled neutron; then + iniset $SAHARA_CONF_FILE DEFAULT use_neutron true + iniset $SAHARA_CONF_FILE DEFAULT use_floating_ips true + fi + + if is_service_enabled heat; then + iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine heat + else + iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine direct + fi + + iniset $SAHARA_CONF_FILE DEFAULT use_syslog $SYSLOG + + recreate_database sahara utf8 + $SAHARA_BIN_DIR/sahara-db-manage --config-file $SAHARA_CONF_FILE upgrade head +} + +# install_sahara() - Collect source and prepare +function install_sahara { + git_clone $SAHARA_REPO $SAHARA_DIR $SAHARA_BRANCH + setup_develop $SAHARA_DIR +} + +# start_sahara() - Start running processes, including screen +function start_sahara { + screen_it sahara "cd $SAHARA_DIR && $SAHARA_BIN_DIR/sahara-api --config-file $SAHARA_CONF_FILE" +} + +# stop_sahara() - Stop running processes +function stop_sahara { + # Kill the Sahara screen windows + screen -S $SCREEN_NAME -p sahara -X kill +} + + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/sahara-dashboard b/lib/sahara-dashboard new file mode 100644 index 0000000000..a81df0f7a8 --- /dev/null +++ b/lib/sahara-dashboard @@ -0,0 +1,72 @@ +# lib/sahara-dashboard + +# Dependencies: +# +# - ``functions`` file +# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# - ``SERVICE_HOST`` + +# ``stack.sh`` calls the entry points in this order: +# +# - install_sahara_dashboard +# - configure_sahara_dashboard +# - cleanup_sahara_dashboard + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/horizon + +# Defaults +# -------- + +# Set up default repos +SAHARA_DASHBOARD_REPO=${SAHARA_DASHBOARD_REPO:-${GIT_BASE}/openstack/sahara-dashboard.git} +SAHARA_DASHBOARD_BRANCH=${SAHARA_DASHBOARD_BRANCH:-master} + +SAHARA_PYTHONCLIENT_REPO=${SAHARA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-saharaclient.git} +SAHARA_PYTHONCLIENT_BRANCH=${SAHARA_PYTHONCLIENT_BRANCH:-master} + +# Set up default directories +SAHARA_DASHBOARD_DIR=$DEST/sahara-dashboard +SAHARA_PYTHONCLIENT_DIR=$DEST/python-saharaclient + +# Functions +# --------- + +function configure_sahara_dashboard { + + echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py + echo -e "HORIZON_CONFIG['dashboards'] += ('sahara',)" >> $HORIZON_DIR/openstack_dashboard/settings.py + echo -e "INSTALLED_APPS += ('saharadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py + + if is_service_enabled neutron; then + echo -e "SAHARA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py + fi +} + +# install_sahara_dashboard() - Collect source and prepare +function install_sahara_dashboard { + install_python_saharaclient + git_clone $SAHARA_DASHBOARD_REPO $SAHARA_DASHBOARD_DIR $SAHARA_DASHBOARD_BRANCH + setup_develop $SAHARA_DASHBOARD_DIR +} + +function install_python_saharaclient { + git_clone $SAHARA_PYTHONCLIENT_REPO $SAHARA_PYTHONCLIENT_DIR $SAHARA_PYTHONCLIENT_BRANCH + setup_develop $SAHARA_PYTHONCLIENT_DIR +} + +# Cleanup file settings.py from Sahara +function cleanup_sahara_dashboard { + sed -i '/sahara/d' $HORIZON_DIR/openstack_dashboard/settings.py +} + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: + diff --git a/lib/savanna b/lib/savanna deleted file mode 100644 index 2cb092c96c..0000000000 --- a/lib/savanna +++ /dev/null @@ -1,173 +0,0 @@ -# lib/savanna - -# Dependencies: -# ``functions`` file -# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined - -# ``stack.sh`` calls the entry points in this order: -# -# install_savanna -# configure_savanna -# start_savanna -# stop_savanna -# cleanup_savanna - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default repos -SAVANNA_REPO=${SAVANNA_REPO:-${GIT_BASE}/openstack/savanna.git} -SAVANNA_BRANCH=${SAVANNA_BRANCH:-master} - -# Set up default directories -SAVANNA_DIR=$DEST/savanna -SAVANNA_CONF_DIR=${SAVANNA_CONF_DIR:-/etc/savanna} -SAVANNA_CONF_FILE=${SAVANNA_CONF_DIR}/savanna.conf -SAVANNA_DEBUG=${SAVANNA_DEBUG:-True} - -SAVANNA_SERVICE_HOST=${SAVANNA_SERVICE_HOST:-$SERVICE_HOST} -SAVANNA_SERVICE_PORT=${SAVANNA_SERVICE_PORT:-8386} -SAVANNA_SERVICE_PROTOCOL=${SAVANNA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} - -SAVANNA_AUTH_CACHE_DIR=${SAVANNA_AUTH_CACHE_DIR:-/var/cache/savanna} - -# Support entry points installation of console scripts -if [[ -d $SAVANNA_DIR/bin ]]; then - SAVANNA_BIN_DIR=$SAVANNA_DIR/bin -else - SAVANNA_BIN_DIR=$(get_python_exec_prefix) -fi - -# Tell Tempest this project is present -TEMPEST_SERVICES+=,savanna - - -# Functions -# --------- - -# create_savanna_accounts() - Set up common required savanna accounts -# -# Tenant User Roles -# ------------------------------ -# service savanna admin -function create_savanna_accounts { - - SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") - - SAVANNA_USER=$(openstack user create \ - savanna \ - --password "$SERVICE_PASSWORD" \ - --project $SERVICE_TENANT \ - --email savanna@example.com \ - | grep " id " | get_field 2) - openstack role add \ - $ADMIN_ROLE \ - --project $SERVICE_TENANT \ - --user $SAVANNA_USER - - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - SAVANNA_SERVICE=$(openstack service create \ - savanna \ - --type=data_processing \ - --description="Savanna Data Processing" \ - | grep " id " | get_field 2) - openstack endpoint create \ - $SAVANNA_SERVICE \ - --region RegionOne \ - --publicurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ - --adminurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ - --internalurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" - fi -} - -# cleanup_savanna() - Remove residual data files, anything left over from -# previous runs that would need to clean up. -function cleanup_savanna { - - # Cleanup auth cache dir - sudo rm -rf $SAVANNA_AUTH_CACHE_DIR -} - -# configure_savanna() - Set config files, create data dirs, etc -function configure_savanna { - - if [[ ! -d $SAVANNA_CONF_DIR ]]; then - sudo mkdir -p $SAVANNA_CONF_DIR - fi - sudo chown $STACK_USER $SAVANNA_CONF_DIR - - # Copy over savanna configuration file and configure common parameters. - cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_FILE - - # Create auth cache dir - sudo mkdir -p $SAVANNA_AUTH_CACHE_DIR - sudo chown $STACK_USER $SAVANNA_AUTH_CACHE_DIR - rm -rf $SAVANNA_AUTH_CACHE_DIR/* - - # Set obsolete keystone auth configs for backward compatibility - iniset $SAVANNA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_SERVICE_HOST - iniset $SAVANNA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_SERVICE_PORT - iniset $SAVANNA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_SERVICE_PROTOCOL - iniset $SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD - iniset $SAVANNA_CONF_FILE DEFAULT os_admin_username savanna - iniset $SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME - - # Set actual keystone auth configs - iniset $SAVANNA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ - iniset $SAVANNA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $SAVANNA_CONF_FILE keystone_authtoken admin_user savanna - iniset $SAVANNA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $SAVANNA_CONF_FILE keystone_authtoken signing_dir $SAVANNA_AUTH_CACHE_DIR - iniset $SAVANNA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA - - iniset $SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG - - iniset $SAVANNA_CONF_FILE database connection `database_connection_url savanna` - - if is_service_enabled neutron; then - iniset $SAVANNA_CONF_FILE DEFAULT use_neutron true - iniset $SAVANNA_CONF_FILE DEFAULT use_floating_ips true - fi - - if is_service_enabled heat; then - iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine heat - else - iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine savanna - fi - - iniset $SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG - - recreate_database savanna utf8 - $SAVANNA_BIN_DIR/savanna-db-manage --config-file $SAVANNA_CONF_FILE upgrade head -} - -# install_savanna() - Collect source and prepare -function install_savanna { - git_clone $SAVANNA_REPO $SAVANNA_DIR $SAVANNA_BRANCH - setup_develop $SAVANNA_DIR -} - -# start_savanna() - Start running processes, including screen -function start_savanna { - screen_it savanna "cd $SAVANNA_DIR && $SAVANNA_BIN_DIR/savanna-api --config-file $SAVANNA_CONF_FILE" -} - -# stop_savanna() - Stop running processes -function stop_savanna { - # Kill the Savanna screen windows - screen -S $SCREEN_NAME -p savanna -X kill -} - - -# Restore xtrace -$XTRACE - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard deleted file mode 100644 index 6fe15a3c81..0000000000 --- a/lib/savanna-dashboard +++ /dev/null @@ -1,72 +0,0 @@ -# lib/savanna-dashboard - -# Dependencies: -# -# - ``functions`` file -# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined -# - ``SERVICE_HOST`` - -# ``stack.sh`` calls the entry points in this order: -# -# - install_savanna_dashboard -# - configure_savanna_dashboard -# - cleanup_savanna_dashboard - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - -source $TOP_DIR/lib/horizon - -# Defaults -# -------- - -# Set up default repos -SAVANNA_DASHBOARD_REPO=${SAVANNA_DASHBOARD_REPO:-${GIT_BASE}/openstack/savanna-dashboard.git} -SAVANNA_DASHBOARD_BRANCH=${SAVANNA_DASHBOARD_BRANCH:-master} - -SAVANNA_PYTHONCLIENT_REPO=${SAVANNA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-savannaclient.git} -SAVANNA_PYTHONCLIENT_BRANCH=${SAVANNA_PYTHONCLIENT_BRANCH:-master} - -# Set up default directories -SAVANNA_DASHBOARD_DIR=$DEST/savanna-dashboard -SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient - -# Functions -# --------- - -function configure_savanna_dashboard { - - echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py - echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)" >> $HORIZON_DIR/openstack_dashboard/settings.py - echo -e "INSTALLED_APPS += ('savannadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py - - if is_service_enabled neutron; then - echo -e "SAVANNA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py - fi -} - -# install_savanna_dashboard() - Collect source and prepare -function install_savanna_dashboard { - install_python_savannaclient - git_clone $SAVANNA_DASHBOARD_REPO $SAVANNA_DASHBOARD_DIR $SAVANNA_DASHBOARD_BRANCH - setup_develop $SAVANNA_DASHBOARD_DIR -} - -function install_python_savannaclient { - git_clone $SAVANNA_PYTHONCLIENT_REPO $SAVANNA_PYTHONCLIENT_DIR $SAVANNA_PYTHONCLIENT_BRANCH - setup_develop $SAVANNA_PYTHONCLIENT_DIR -} - -# Cleanup file settings.py from Savanna -function cleanup_savanna_dashboard { - sed -i '/savanna/d' $HORIZON_DIR/openstack_dashboard/settings.py -} - -# Restore xtrace -$XTRACE - -# Local variables: -# mode: shell-script -# End: - From 51ebda6c8d37539473e463e8b24f27f21d798392 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Wed, 12 Mar 2014 22:26:12 +0400 Subject: [PATCH 0590/4438] Use sahara.conf.sample instead of old one Sahara internals was updated, now we can use correct conf sample. Change-Id: Ia8d99c2742785c3b5c724617a5dfc2880624a03f --- lib/sahara | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/sahara b/lib/sahara index 4cb04ecd3a..38b4ecd7e9 100644 --- a/lib/sahara +++ b/lib/sahara @@ -106,8 +106,7 @@ function configure_sahara { sudo chown $STACK_USER $SAHARA_CONF_DIR # Copy over sahara configuration file and configure common parameters. - # TODO(slukjanov): rename when sahara internals will be updated - cp $SAHARA_DIR/etc/savanna/savanna.conf.sample $SAHARA_CONF_FILE + cp $SAHARA_DIR/etc/sahara/sahara.conf.sample $SAHARA_CONF_FILE # Create auth cache dir sudo mkdir -p $SAHARA_AUTH_CACHE_DIR From 1a0c090057dde13fd3bb8ffcb84a923eb5952084 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 12 Mar 2014 14:59:50 -0500 Subject: [PATCH 0591/4438] Additional attempts to flosh stdout/stderr The logfile output is piped through awk to apply a timestamp and filter out all of the xtrace commands in the xtrace output. A while back we added fflush("") which is supposed to flush all open output files and pipes. It appears that gawk in precise is old enough that it may only flush stdout, so explicitly flush the logfile handle. Change-Id: If5198c2da2a3278eed8ae3d50c7ca5c15eac6d94 --- stack.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/stack.sh b/stack.sh index e76a55c534..a16046474c 100755 --- a/stack.sh +++ b/stack.sh @@ -541,6 +541,7 @@ if [[ -n "$LOGFILE" ]]; then print print > logfile fflush("") + fflush(logfile) }' ) 2>&1 # Set up a second fd for output exec 6> >( tee "${SUMFILE}" ) From 26c5a2252e9b99e053616d262fb627c1716a2e4d Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 12 Mar 2014 18:37:37 -0400 Subject: [PATCH 0592/4438] change the vmdk to the one used in VMWare ci The debian image that defaults to being using with vmware is huge, and it turns out it's not actually used in VMWare ci so we don't really know if it's working. Instead use the vmdk that is used in VMWare ci, which we know will boot, as we get results everyday. Change-Id: I014746af293852525e2bd128c4d19f5889ecd55d --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 6bb6f37195..cff1e26209 100644 --- a/stackrc +++ b/stackrc @@ -335,7 +335,7 @@ case "$VIRT_DRIVER" in ;; vsphere) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-debian-2.6.32-i686} - IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/debian-2.6.32-i686.vmdk"};; + IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/cirros-0.3.0-i386-disk.vmdk"};; xenserver) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-disk} IMAGE_URLS=${IMAGE_URLS:-"https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz"};; From 7eb99343979921993dc361f71b5efd77e9130f78 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 6 Feb 2014 10:33:40 +0100 Subject: [PATCH 0593/4438] Setup the correct ec2 manifest path setup correctly the path to the ec2 boundled images. Change-Id: If3bce845e009a73c6b685976de3fa6d44b907bed --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index c74f00d1ab..a3df45e81c 100644 --- a/lib/tempest +++ b/lib/tempest @@ -310,6 +310,9 @@ function configure_tempest { iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" iniset $TEMPEST_CONFIG boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}" iniset $TEMPEST_CONFIG boto s3_materials_path "$BOTO_MATERIALS_PATH" + iniset $TEMPEST_CONFIG boto ari_manifest cirros-0.3.1-x86_64-initrd.manifest.xml + iniset $TEMPEST_CONFIG boto ami_manifest cirros-0.3.1-x86_64-blank.img.manifest.xml + iniset $TEMPEST_CONFIG boto aki_manifest cirros-0.3.1-x86_64-vmlinuz.manifest.xml iniset $TEMPEST_CONFIG boto instance_type "$boto_instance_type" iniset $TEMPEST_CONFIG boto http_socket_timeout 30 iniset $TEMPEST_CONFIG boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} From 0f73ff2c516cb9fdb6849f7feb19cd0cfde46852 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 13 Mar 2014 14:20:43 -0700 Subject: [PATCH 0594/4438] Move libvirt install + setup to functions-libvirt Moves installation and setup of libvirt to a common functions-libvirt, which can be used by other drivers in the future that may require cross-distro libvirt installation and config but are not using VIRT_DRIVER=libvirt (ie, Ironic). Change-Id: I4a9255c8b4bacd5acfde9b8061c9e537aeea592c --- lib/nova_plugins/functions-libvirt | 125 ++++++++++++++++++++++++++++ lib/nova_plugins/hypervisor-libvirt | 99 +--------------------- 2 files changed, 128 insertions(+), 96 deletions(-) create mode 100644 lib/nova_plugins/functions-libvirt diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt new file mode 100644 index 0000000000..adffe010ee --- /dev/null +++ b/lib/nova_plugins/functions-libvirt @@ -0,0 +1,125 @@ +# lib/nova_plugins/functions-libvirt +# Common libvirt configuration functions + +# Dependencies: +# ``functions`` file +# ``STACK_USER`` has to be defined + +# Save trace setting +LV_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# ------- + +# if we should turn on massive libvirt debugging +DEBUG_LIBVIRT=$(trueorfalse False $DEBUG_LIBVIRT) + +# Installs required distro-specific libvirt packages. +function install_libvirt { + if is_ubuntu; then + install_package kvm + install_package libvirt-bin + install_package python-libvirt + install_package python-guestfs + elif is_fedora || is_suse; then + install_package kvm + install_package libvirt + install_package libvirt-python + install_package python-libguestfs + fi +} + +# Configures the installed libvirt system so that is accessible by +# STACK_USER via qemu:///system with management capabilities. +function configure_libvirt { + if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then + # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces + cat </dev/null; then + sudo groupadd $LIBVIRT_GROUP + fi + add_user_to_group $STACK_USER $LIBVIRT_GROUP + + # Enable server side traces for libvirtd + if [[ "$DEBUG_LIBVIRT" = "True" ]] ; then + local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" + local log_outputs="1:file:/var/log/libvirt/libvirtd.log" + if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then + echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi + if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then + echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi + fi + + # libvirt detects various settings on startup, as we potentially changed + # the system configuration (modules, filesystems), we need to restart + # libvirt to detect those changes. + restart_service $LIBVIRT_DAEMON +} + + +# Restore xtrace +$LV_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 5a51f33808..053df3cdf5 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -7,7 +7,6 @@ # Dependencies: # ``functions`` file # ``nova`` configuration -# ``STACK_USER`` has to be defined # install_nova_hypervisor - install any external requirements # configure_nova_hypervisor - make configuration changes, including those to other services @@ -19,14 +18,13 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace +source $TOP_DIR/lib/nova_plugins/functions-libvirt # Defaults # -------- # File injection is disabled by default in Nova. This will turn it back on. ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False} -# if we should turn on massive libvirt debugging -DEBUG_LIBVIRT=$(trueorfalse False $DEBUG_LIBVIRT) # Entry Points @@ -40,88 +38,7 @@ function cleanup_nova_hypervisor { # configure_nova_hypervisor - Set config files, create data dirs, etc function configure_nova_hypervisor { - if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then - # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces - cat </dev/null; then - sudo groupadd $LIBVIRT_GROUP - fi - add_user_to_group $STACK_USER $LIBVIRT_GROUP - - # Enable server side traces for libvirtd - if [[ "$DEBUG_LIBVIRT" = "True" ]] ; then - local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" - local log_outputs="1:file:/var/log/libvirt/libvirtd.log" - if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then - echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf - fi - if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then - echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf - fi - fi - - # libvirt detects various settings on startup, as we potentially changed - # the system configuration (modules, filesystems), we need to restart - # libvirt to detect those changes. - restart_service $LIBVIRT_DAEMON - + configure_libvirt iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" iniset $NOVA_CONF DEFAULT use_usb_tablet "False" @@ -150,17 +67,7 @@ EOF # install_nova_hypervisor() - Install external components function install_nova_hypervisor { - if is_ubuntu; then - install_package kvm - install_package libvirt-bin - install_package python-libvirt - install_package python-guestfs - elif is_fedora || is_suse; then - install_package kvm - install_package libvirt - install_package libvirt-python - install_package python-libguestfs - fi + install_libvirt # Install and configure **LXC** if specified. LXC is another approach to # splitting a system into many smaller parts. LXC uses cgroups and chroot From bbf759e9ed59b31258bcc8ba9fd3c79db9e57aee Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 13 Mar 2014 18:09:17 -0700 Subject: [PATCH 0595/4438] Only stop n-cpu in stop_nova_compute Move screen_stop n-cpu from stop_nova_rest to stop_nova_compute. Change-Id: I672673a55869d3f68e12c476924fc742e8260f39 --- lib/nova | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 360427d13e..b01d107525 100644 --- a/lib/nova +++ b/lib/nova @@ -716,6 +716,7 @@ function start_nova { } function stop_nova_compute { + screen_stop n-cpu if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then stop_nova_hypervisor fi @@ -725,7 +726,7 @@ function stop_nova_rest { # Kill the nova screen windows # Some services are listed here twice since more than one instance # of a service may be running in certain configs. - for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do + for serv in n-api n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do screen_stop $serv done } From 1e94eb1a3f1c87670ff4720b89f25b95e0d15e07 Mon Sep 17 00:00:00 2001 From: Steve Martinelli Date: Thu, 13 Mar 2014 23:22:39 -0500 Subject: [PATCH 0596/4438] Move from keystoneclient to openstackclient in eucarc Updating an ec2 create command to openstackclient syntax. Change-Id: I3dd21ddd52b77f3af76988db9ae6b863427d9106 --- eucarc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eucarc b/eucarc index 350235106c..343f4ccde2 100644 --- a/eucarc +++ b/eucarc @@ -22,7 +22,7 @@ source $RC_DIR/openrc export EC2_URL=$(keystone catalog --service ec2 | awk '/ publicURL / { print $4 }') # Create EC2 credentials for the current user -CREDS=$(keystone ec2-credentials-create) +CREDS=$(openstack ec2 credentials create) export EC2_ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') export EC2_SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') From 2f6c30b33c074a03748b7c0273c49fe81ab96607 Mon Sep 17 00:00:00 2001 From: Steve Martinelli Date: Thu, 13 Mar 2014 23:32:46 -0500 Subject: [PATCH 0597/4438] Update client-env to use openstackclient commands Updated the only instance of a keystoneclient command, to check if the identity service is enabled. Change-Id: If86f71c1610a79690d6c6a8eb423b6fa234372bb --- exercises/client-env.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/client-env.sh b/exercises/client-env.sh index d955e4d1e1..4e8259cd06 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -64,7 +64,7 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then STATUS_KEYSTONE="Skipped" else echo -e "\nTest Keystone" - if keystone catalog --service identity; then + if openstack endpoint show identity; then STATUS_KEYSTONE="Succeeded" else STATUS_KEYSTONE="Failed" From 4376ae04df50fb9b338039b02a94fea351cedb28 Mon Sep 17 00:00:00 2001 From: Tiago Mello Date: Fri, 14 Mar 2014 10:48:56 -0300 Subject: [PATCH 0598/4438] Clean /etc/mysql when calling clean.sh The clean.sh script should also remove the /etc/mysql directory. It contains information from the old devstack installation and may conflict with the further one. apt-get purge does not remove it since the directory is not empty. Change-Id: I885345a2311851d8746abe42e44300ecd4f6e08a --- lib/databases/mysql | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/databases/mysql b/lib/databases/mysql index f5ee3c0ed0..7a0145ae1b 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -23,6 +23,7 @@ function cleanup_database_mysql { stop_service $MYSQL apt_get purge -y mysql* sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql return elif is_fedora; then if [[ $DISTRO =~ (rhel7) ]]; then From 0b03e7acb84e14efed3bfc2b30055a8427a40a12 Mon Sep 17 00:00:00 2001 From: Rafael Folco Date: Fri, 14 Mar 2014 11:14:57 -0300 Subject: [PATCH 0599/4438] Set correct default disk bus back to virtio on ppc64 virtio is supported and should be the default disk bus on Power to take advantage of I/O performance drivers. This aligns with Nova default bus values on PowerKVM. SCSI is the default for cdrom. Change-Id: I5de08c90359b3a500c352c09c07b6b082ddb4325 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 1d30922916..e439ef6dbe 100644 --- a/functions +++ b/functions @@ -290,7 +290,7 @@ function upload_image { esac if is_arch "ppc64"; then - IMG_PROPERTY="--property hw_disk_bus=scsi --property hw_cdrom_bus=scsi" + IMG_PROPERTY="--property hw_cdrom_bus=scsi" fi if [ "$CONTAINER_FORMAT" = "bare" ]; then From 846609b627bff979ce767dd9ad00daa46a150342 Mon Sep 17 00:00:00 2001 From: Piyush Masrani Date: Fri, 14 Mar 2014 19:21:48 +0530 Subject: [PATCH 0600/4438] Devstack changes to ceilometer to support vsphere Ceilometer currently supports only libvirt when installed using devstack. Have extended this support to Vmware Vsphere in this changelist. Change-Id: I98c64204973bca5e6a7f859a5431adb2b661277f --- lib/ceilometer | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/ceilometer b/lib/ceilometer index b0899e2f24..abf4629b5e 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -161,6 +161,13 @@ function configure_ceilometer { configure_mongodb cleanup_ceilometer fi + + if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then + iniset $CEILOMETER_CONF DEFAULT hypervisor_inspector vsphere + iniset $CEILOMETER_CONF vmware host_ip "$VMWAREAPI_IP" + iniset $CEILOMETER_CONF vmware host_username "$VMWAREAPI_USER" + iniset $CEILOMETER_CONF vmware host_password "$VMWAREAPI_PASSWORD" + fi } function configure_mongodb { @@ -204,6 +211,9 @@ function start_ceilometer { if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" fi + if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then + screen_it ceilometer-acompute "cd ; ceilometer-agent-compute --config-file $CEILOMETER_CONF" + fi screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF" screen_it ceilometer-anotification "cd ; ceilometer-agent-notification --config-file $CEILOMETER_CONF" screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF" From 380587bde6444edcc8c0b3adad250de70b27ad33 Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Fri, 14 Mar 2014 12:22:18 -0400 Subject: [PATCH 0601/4438] Rollback workaround for Marconi This patch rollsback the stderr redirection in Marconi. Change-Id: Iaa2d897295cf2bc2e4a8c370d3e0592def337c78 --- lib/marconi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index 3c4547fc75..fd1c35132a 100644 --- a/lib/marconi +++ b/lib/marconi @@ -154,7 +154,7 @@ function install_marconiclient { # start_marconi() - Start running processes, including screen function start_marconi { - screen_it marconi-server "marconi-server --config-file $MARCONI_CONF 2>&1" + screen_it marconi-server "marconi-server --config-file $MARCONI_CONF" echo "Waiting for Marconi to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT/v1/health; do sleep 1; done"; then die $LINENO "Marconi did not start" From 29870cce3214766ecc208d0bb404724cf232ad69 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 14 Mar 2014 14:32:01 -0400 Subject: [PATCH 0602/4438] as is_heat_enabled this is missing, and the code assumes "heat" to be in the enabled services list otherwise. Change-Id: Ib0a7db04d8e38b58aca48261308e7c4d1fd43972 --- lib/heat | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/heat b/lib/heat index 2d9d863f0c..902333e29a 100644 --- a/lib/heat +++ b/lib/heat @@ -45,6 +45,13 @@ TEMPEST_SERVICES+=,heat # Functions # --------- +# Test if any Heat services are enabled +# is_heat_enabled +function is_heat_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"h-" ]] && return 0 + return 1 +} + # cleanup_heat() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_heat { From 06fb29c66124b6c753fdd262eb262043b4551298 Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Fri, 31 Jan 2014 18:02:07 +0400 Subject: [PATCH 0603/4438] Integration testing preparation for Ironic Add ability to create/register qemu vms for Ironic testing purposes Implements bp:deprecate-baremetal-driver Change-Id: If452438fcc0ff562531b33a36cd189b235654b48 --- extras.d/50-ironic.sh | 7 + files/apts/ironic | 10 + files/rpms/ironic | 9 + lib/baremetal | 7 +- lib/ironic | 265 ++++++++++++++++++- lib/nova_plugins/hypervisor-ironic | 75 ++++++ stackrc | 2 +- tools/install_prereqs.sh | 8 +- tools/ironic/scripts/cleanup-nodes | 25 ++ tools/ironic/scripts/configure-vm | 78 ++++++ tools/ironic/scripts/create-nodes | 68 +++++ tools/ironic/scripts/setup-network | 24 ++ tools/ironic/templates/brbm.xml | 6 + tools/ironic/templates/tftpd-xinetd.template | 11 + tools/ironic/templates/vm.xml | 43 +++ 15 files changed, 630 insertions(+), 8 deletions(-) create mode 100644 files/apts/ironic create mode 100644 files/rpms/ironic create mode 100644 lib/nova_plugins/hypervisor-ironic create mode 100755 tools/ironic/scripts/cleanup-nodes create mode 100755 tools/ironic/scripts/configure-vm create mode 100755 tools/ironic/scripts/create-nodes create mode 100755 tools/ironic/scripts/setup-network create mode 100644 tools/ironic/templates/brbm.xml create mode 100644 tools/ironic/templates/tftpd-xinetd.template create mode 100644 tools/ironic/templates/vm.xml diff --git a/extras.d/50-ironic.sh b/extras.d/50-ironic.sh index 9e61dc5d78..3b8e3d5045 100644 --- a/extras.d/50-ironic.sh +++ b/extras.d/50-ironic.sh @@ -24,10 +24,17 @@ if is_service_enabled ir-api ir-cond; then # Start the ironic API and ironic taskmgr components echo_summary "Starting Ironic" start_ironic + + if [[ "$IRONIC_BAREMETAL_BASIC_OPS" = "True" ]]; then + prepare_baremetal_basic_ops + fi fi if [[ "$1" == "unstack" ]]; then stop_ironic + if [[ "$IRONIC_BAREMETAL_BASIC_OPS" = "True" ]]; then + cleanup_baremetal_basic_ops + fi fi if [[ "$1" == "clean" ]]; then diff --git a/files/apts/ironic b/files/apts/ironic new file mode 100644 index 0000000000..a749ad762e --- /dev/null +++ b/files/apts/ironic @@ -0,0 +1,10 @@ +libguestfs0 +libvirt-bin +openssh-client +openvswitch-switch +openvswitch-datapath-dkms +python-libguestfs +python-libvirt +syslinux +tftpd-hpa +xinetd diff --git a/files/rpms/ironic b/files/rpms/ironic new file mode 100644 index 0000000000..54b98299ee --- /dev/null +++ b/files/rpms/ironic @@ -0,0 +1,9 @@ +libguestfs +libvirt +libvirt-python +openssh-clients +openvswitch +python-libguestfs +syslinux +tftp-server +xinetd diff --git a/lib/baremetal b/lib/baremetal index 1d02e1e417..eda92f97cb 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -140,7 +140,10 @@ BM_DEPLOY_KERNEL=${BM_DEPLOY_KERNEL:-} # If you need to add any extra flavors to the deploy ramdisk image # eg, specific network drivers, specify them here -BM_DEPLOY_FLAVOR=${BM_DEPLOY_FLAVOR:-} +# +# NOTE(deva): this will be moved to lib/ironic in a future patch +# for now, set the default to a suitable value for Ironic's needs +BM_DEPLOY_FLAVOR=${BM_DEPLOY_FLAVOR:--a amd64 ubuntu deploy-ironic} # set URL and version for google shell-in-a-box BM_SHELL_IN_A_BOX=${BM_SHELL_IN_A_BOX:-http://shellinabox.googlecode.com/files/shellinabox-2.14.tar.gz} @@ -220,7 +223,7 @@ function upload_baremetal_deploy { BM_DEPLOY_KERNEL=bm-deploy.kernel BM_DEPLOY_RAMDISK=bm-deploy.initramfs if [ ! -e "$TOP_DIR/files/$BM_DEPLOY_KERNEL" -o ! -e "$TOP_DIR/files/$BM_DEPLOY_RAMDISK" ]; then - $BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR deploy \ + $BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR \ -o $TOP_DIR/files/bm-deploy fi fi diff --git a/lib/ironic b/lib/ironic index b346de1e69..c6fa563e6a 100644 --- a/lib/ironic +++ b/lib/ironic @@ -18,16 +18,19 @@ # - stop_ironic # - cleanup_ironic -# Save trace setting +# Save trace and pipefail settings XTRACE=$(set +o | grep xtrace) +PIPEFAIL=$(set +o | grep pipefail) set +o xtrace - +set +o pipefail # Defaults # -------- # Set up default directories IRONIC_DIR=$DEST/ironic +IRONIC_DATA_DIR=$DATA_DIR/ironic +IRONIC_STATE_PATH=/var/lib/ironic IRONICCLIENT_DIR=$DEST/python-ironicclient IRONIC_AUTH_CACHE_DIR=${IRONIC_AUTH_CACHE_DIR:-/var/cache/ironic} IRONIC_CONF_DIR=${IRONIC_CONF_DIR:-/etc/ironic} @@ -35,6 +38,28 @@ IRONIC_CONF_FILE=$IRONIC_CONF_DIR/ironic.conf IRONIC_ROOTWRAP_CONF=$IRONIC_CONF_DIR/rootwrap.conf IRONIC_POLICY_JSON=$IRONIC_CONF_DIR/policy.json +# Set up defaults for functional / integration testing +IRONIC_SCRIPTS_DIR=${IRONIC_SCRIPTS_DIR:-$TOP_DIR/tools/ironic/scripts} +IRONIC_TEMPLATES_DIR=${IRONIC_TEMPLATES_DIR:-$TOP_DIR/tools/ironic/templates} +IRONIC_BAREMETAL_BASIC_OPS=$(trueorfalse False $IRONIC_BAREMETAL_BASIC_OPS) +IRONIC_SSH_USERNAME=${IRONIC_SSH_USERNAME:-`whoami`} +IRONIC_SSH_KEY_DIR=${IRONIC_SSH_KEY_DIR:-$IRONIC_DATA_DIR/ssh_keys} +IRONIC_SSH_KEY_FILENAME=${IRONIC_SSH_KEY_FILENAME:-ironic_key} +IRONIC_KEY_FILE=$IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME +IRONIC_SSH_VIRT_TYPE=${IRONIC_SSH_VIRT_TYPE:-virsh} +IRONIC_TFTPBOOT_DIR=${IRONIC_TFTPBOOT_DIR:-$IRONIC_DATA_DIR/tftpboot} +IRONIC_VM_SSH_PORT=${IRONIC_VM_SSH_PORT:-2222} +IRONIC_VM_SSH_ADDRESS=${IRONIC_VM_SSH_ADDRESS:-$HOST_IP} +IRONIC_VM_COUNT=${IRONIC_VM_COUNT:-1} +IRONIC_VM_SPECS_CPU=${IRONIC_VM_SPECS_CPU:-1} +IRONIC_VM_SPECS_RAM=${IRONIC_VM_SPECS_RAM:-256} +IRONIC_VM_SPECS_DISK=${IRONIC_VM_SPECS_DISK:-10} +IRONIC_VM_EMULATOR=${IRONIC_VM_EMULATOR:-/usr/bin/qemu-system-x86_64} +IRONIC_VM_NETWORK_BRIDGE=${IRONIC_VM_NETWORK_BRIDGE:-brbm} +IRONIC_VM_NETWORK_RANGE=${IRONIC_VM_NETWORK_RANGE:-192.0.2.0/24} +IRONIC_VM_MACS_CSV_FILE=${IRONIC_VM_MACS_CSV_FILE:-$IRONIC_DATA_DIR/ironic_macs.csv} +IRONIC_AUTHORIZED_KEYS_FILE=${IRONIC_AUTHORIZED_KEYS_FILE:-$HOME/.ssh/authorized_keys} + # Support entry points installation of console scripts IRONIC_BIN_DIR=$(get_python_exec_prefix) @@ -86,8 +111,8 @@ function configure_ironic { iniset $IRONIC_CONF_FILE DEFAULT debug True inicomment $IRONIC_CONF_FILE DEFAULT log_file iniset $IRONIC_CONF_FILE DEFAULT sql_connection `database_connection_url ironic` + iniset $IRONIC_CONF_FILE DEFAULT state_path $IRONIC_STATE_PATH iniset $IRONIC_CONF_FILE DEFAULT use_syslog $SYSLOG - # Configure Ironic conductor, if it was enabled. if is_service_enabled ir-cond; then configure_ironic_conductor @@ -97,6 +122,10 @@ function configure_ironic { if is_service_enabled ir-api; then configure_ironic_api fi + + if [[ "$IRONIC_BAREMETAL_BASIC_OPS" == "True" ]]; then + configure_ironic_auxiliary + fi } # configure_ironic_api() - Is used by configure_ironic(). Performs @@ -125,6 +154,10 @@ function configure_ironic_conductor { cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR iniset $IRONIC_CONF_FILE DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF + iniset $IRONIC_CONF_FILE conductor api_url http://$SERVICE_HOST:6385 + iniset $IRONIC_CONF_FILE pxe tftp_server $SERVICE_HOST + iniset $IRONIC_CONF_FILE pxe tftp_root $IRONIC_TFTPBOOT_DIR + iniset $IRONIC_CONF_FILE pxe tftp_master_path $IRONIC_TFTPBOOT_DIR/master_images } # create_ironic_cache_dir() - Part of the init_ironic() process @@ -225,9 +258,233 @@ function stop_ironic { screen -S $SCREEN_NAME -p ir-cond -X kill } +function is_ironic { + if ( is_service_enabled ir-cond && is_service_enabled ir-api ); then + return 0 + fi + return 1 +} + +function configure_ironic_dirs { + sudo mkdir -p $IRONIC_DATA_DIR + sudo mkdir -p $IRONIC_STATE_PATH + sudo mkdir -p $IRONIC_TFTPBOOT_DIR + sudo chown -R $STACK_USER $IRONIC_DATA_DIR $IRONIC_STATE_PATH + sudo chown -R $STACK_USER:$LIBVIRT_GROUP $IRONIC_TFTPBOOT_DIR + if is_ubuntu; then + PXEBIN=/usr/lib/syslinux/pxelinux.0 + elif is_fedora; then + PXEBIN=/usr/share/syslinux/pxelinux.0 + fi + if [ ! -f $PXEBIN ]; then + die $LINENO "pxelinux.0 (from SYSLINUX) not found." + fi + + cp $PXEBIN $IRONIC_TFTPBOOT_DIR + mkdir -p $IRONIC_TFTPBOOT_DIR/pxelinux.cfg +} + +function ironic_ensure_libvirt_group { + groups $STACK_USER | grep -q $LIBVIRT_GROUP || adduser $STACK_USER $LIBVIRT_GROUP +} + +function create_bridge_and_vms { + ironic_ensure_libvirt_group + + # Call libvirt setup scripts in a new shell to ensure any new group membership + sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/setup-network" + + sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/create-nodes \ + $IRONIC_VM_SPECS_CPU $IRONIC_VM_SPECS_RAM $IRONIC_VM_SPECS_DISK \ + amd64 $IRONIC_VM_COUNT $IRONIC_VM_NETWORK_BRIDGE $IRONIC_VM_EMULATOR" >> $IRONIC_VM_MACS_CSV_FILE + +} + +function enroll_vms { + + CHASSIS_ID=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2) + IRONIC_NET_ID=$(neutron net-list | grep private | get_field 1) + local idx=0 + + # work around; need to know what netns neutron uses for private network + neutron port-create private + + while read MAC; do + + NODE_ID=$(ironic node-create --chassis_uuid $CHASSIS_ID --driver pxe_ssh \ + -i ssh_virt_type=$IRONIC_SSH_VIRT_TYPE \ + -i ssh_address=$IRONIC_VM_SSH_ADDRESS \ + -i ssh_port=$IRONIC_VM_SSH_PORT \ + -i ssh_username=$IRONIC_SSH_USERNAME \ + -i ssh_key_filename=$IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME \ + -p cpus=$IRONIC_VM_SPECS_CPU \ + -p memory_mb=$IRONIC_VM_SPECS_RAM \ + -p local_gb=$IRONIC_VM_SPECS_DISK \ + -p cpu_arch=x86_64 \ + | grep " uuid " | get_field 2) + + ironic port-create --address $MAC --node_uuid $NODE_ID + + idx=$((idx+1)) + + done < $IRONIC_VM_MACS_CSV_FILE + + # create the nova flavor + nova flavor-create baremetal auto $IRONIC_VM_SPECS_RAM $IRONIC_VM_SPECS_DISK $IRONIC_VM_SPECS_CPU + nova flavor-key baremetal set "cpu_arch"="x86_64" "baremetal:deploy_kernel_id"="$BM_DEPLOY_KERNEL_ID" "baremetal:deploy_ramdisk_id"="$BM_DEPLOY_RAMDISK_ID" + + # intentional sleep to make sure the tag has been set to port + sleep 10 + TAPDEV=$(sudo ip netns exec qdhcp-${IRONIC_NET_ID} ip link list | grep tap | cut -d':' -f2 | cut -b2-) + TAG_ID=$(sudo ovs-vsctl show |grep ${TAPDEV} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-) + + # make sure veth pair is not existing, otherwise delete its links + sudo ip link show ovs-tap1 && sudo ip link delete ovs-tap1 + sudo ip link show brbm-tap1 && sudo ip link delete brbm-tap1 + # create veth pair for future interconnection between br-int and brbm + sudo ip link add brbm-tap1 type veth peer name ovs-tap1 + sudo ip link set dev brbm-tap1 up + sudo ip link set dev ovs-tap1 up + + sudo ovs-vsctl -- --if-exists del-port ovs-tap1 -- add-port br-int ovs-tap1 tag=$TAG_ID + sudo ovs-vsctl -- --if-exists del-port brbm-tap1 -- add-port $IRONIC_VM_NETWORK_BRIDGE brbm-tap1 +} + +function configure_tftpd { + # enable tftp natting for allowing connections to SERVICE_HOST's tftp server + sudo modprobe nf_conntrack_tftp + sudo modprobe nf_nat_tftp + + if is_ubuntu; then + PXEBIN=/usr/lib/syslinux/pxelinux.0 + elif is_fedora; then + PXEBIN=/usr/share/syslinux/pxelinux.0 + fi + if [ ! -f $PXEBIN ]; then + die $LINENO "pxelinux.0 (from SYSLINUX) not found." + fi + + # stop tftpd and setup serving via xinetd + stop_service tftpd-hpa || true + [ -f /etc/init/tftpd-hpa.conf ] && echo "manual" | sudo tee /etc/init/tftpd-hpa.override + sudo cp $IRONIC_TEMPLATES_DIR/tftpd-xinetd.template /etc/xinetd.d/tftp + sudo sed -e "s|%TFTPBOOT_DIR%|$IRONIC_TFTPBOOT_DIR|g" -i /etc/xinetd.d/tftp + + # setup tftp file mapping to satisfy requests at the root (booting) and + # /tftpboot/ sub-dir (as per deploy-ironic elements) + echo "r ^([^/]) $IRONIC_TFTPBOOT_DIR/\1" >$IRONIC_TFTPBOOT_DIR/map-file + echo "r ^(/tftpboot/) $IRONIC_TFTPBOOT_DIR/\2" >>$IRONIC_TFTPBOOT_DIR/map-file + + chmod -R 0755 $IRONIC_TFTPBOOT_DIR + restart_service xinetd +} + +function configure_ironic_ssh_keypair { + # Generating ssh key pair for stack user + if [[ ! -d $IRONIC_SSH_KEY_DIR ]]; then + mkdir -p $IRONIC_SSH_KEY_DIR + fi + if [[ ! -d $HOME/.ssh ]]; then + mkdir -p $HOME/.ssh + chmod 700 $HOME/.ssh + fi + echo -e 'n\n' | ssh-keygen -q -t rsa -P '' -f $IRONIC_KEY_FILE + cat $IRONIC_KEY_FILE.pub | tee -a $IRONIC_AUTHORIZED_KEYS_FILE +} + +function ironic_ssh_check { + local KEY_FILE=$1 + local FLOATING_IP=$2 + local PORT=$3 + local DEFAULT_INSTANCE_USER=$4 + local ACTIVE_TIMEOUT=$5 + if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -p $PORT -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success; do sleep 1; done"; then + die $LINENO "server didn't become ssh-able!" + fi +} + +function configure_ironic_sshd { + # Ensure sshd server accepts connections from localhost only + + SSH_CONFIG=/etc/ssh/sshd_config + HOST_PORT=$IRONIC_VM_SSH_ADDRESS:$IRONIC_VM_SSH_PORT + if ! sudo grep ListenAddress $SSH_CONFIG | grep $HOST_PORT; then + echo "ListenAddress $HOST_PORT" | sudo tee -a $SSH_CONFIG + fi + + SSH_SERVICE_NAME=sshd + if is_ubuntu; then + SSH_SERVICE_NAME=ssh + fi + + restart_service $SSH_SERVICE_NAME + # to ensure ssh service is up and running + sleep 3 + ironic_ssh_check $IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME $IRONIC_VM_SSH_ADDRESS $IRONIC_VM_SSH_PORT $IRONIC_SSH_USERNAME 10 + +} + +function configure_ironic_auxiliary { + configure_ironic_dirs + configure_ironic_ssh_keypair + configure_ironic_sshd +} + +function prepare_baremetal_basic_ops { + + # install diskimage-builder + git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH + + # make sure all needed service were enabled + for srv in nova glance key neutron; do + if ! is_service_enabled "$srv"; then + die $LINENO "$srv should be enabled for ironic tests" + fi + done + + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + + # stop all nova services + stop_nova || true + + # remove any nova services failure status + find $SERVICE_DIR/$SCREEN_NAME -name 'n-*.failure' -exec rm -f '{}' \; + + # start them again + start_nova_api + start_nova + + TOKEN=$(keystone token-get | grep ' id ' | get_field 2) + die_if_not_set $LINENO TOKEN "Keystone fail to get token" + + echo_summary "Creating and uploading baremetal images for ironic" + + # build and upload separate deploy kernel & ramdisk + upload_baremetal_deploy $TOKEN + + create_bridge_and_vms + enroll_vms + configure_tftpd +} + +function cleanup_baremetal_basic_ops { + rm -f $IRONIC_VM_MACS_CSV_FILE + if [ -f $IRONIC_KEY_FILE ]; then + KEY=`cat $IRONIC_KEY_FILE.pub` + # remove public key from authorized_keys + grep -v "$KEY" $IRONIC_AUTHORIZED_KEYS_FILE > temp && mv temp $IRONIC_AUTHORIZED_KEYS_FILE + chmod 0600 $IRONIC_AUTHORIZED_KEYS_FILE + fi + sudo rm -rf $IRONIC_DATA_DIR $IRONIC_STATE_PATH + sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/cleanup-nodes $IRONIC_VM_COUNT $IRONIC_VM_NETWORK_BRIDGE" + sudo rm -rf /etc/xinetd.d/tftp /etc/init/tftpd-hpa.override + restart_service xinetd +} -# Restore xtrace +# Restore xtrace + pipefail $XTRACE +$PIPEFAIL # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic new file mode 100644 index 0000000000..5af7c0b292 --- /dev/null +++ b/lib/nova_plugins/hypervisor-ironic @@ -0,0 +1,75 @@ +# lib/nova_plugins/hypervisor-ironic +# Configure the ironic hypervisor + +# Enable with: +# VIRT_DRIVER=ironic + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor { + iniset $NOVA_CONF ironic sql_connection `database_connection_url nova_bm` + LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"} + iniset $NOVA_CONF DEFAULT compute_driver ironic.nova.virt.ironic.IronicDriver + iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER + iniset $NOVA_CONF DEFAULT scheduler_host_manager ironic.nova.scheduler.ironic_host_manager.IronicHostManager + iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0 + iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0 + # ironic section + iniset $NOVA_CONF ironic admin_username admin + iniset $NOVA_CONF ironic admin_password $ADMIN_PASSWORD + iniset $NOVA_CONF ironic admin_url $KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 + iniset $NOVA_CONF ironic admin_tenant_name demo + iniset $NOVA_CONF ironic api_endpoint http://$SERVICE_HOST:6358/v1 +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor { + # This function intentionally left blank + : +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor { + # This function intentionally left blank + : +} + + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stackrc b/stackrc index 456637854b..4a997bf77c 100644 --- a/stackrc +++ b/stackrc @@ -267,7 +267,7 @@ DEFAULT_VIRT_DRIVER=libvirt is_package_installed xenserver-core && DEFAULT_VIRT_DRIVER=xenserver VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER} case "$VIRT_DRIVER" in - libvirt) + ironic|libvirt) LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} if [[ "$os_VENDOR" =~ (Debian) ]]; then LIBVIRT_GROUP=libvirt diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index 0c65fd9b00..9651083cb3 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -55,7 +55,13 @@ export_proxy_variables # ================ # Install package requirements -install_package $(get_packages general $ENABLED_SERVICES) +PACKAGES=$(get_packages general $ENABLED_SERVICES) +if is_ubuntu && echo $PACKAGES | grep -q dkms ; then + # ensure headers for the running kernel are installed for any DKMS builds + PACKAGES="$PACKAGES linux-headers-$(uname -r)" +fi + +install_package $PACKAGES if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then if is_ubuntu || is_fedora; then diff --git a/tools/ironic/scripts/cleanup-nodes b/tools/ironic/scripts/cleanup-nodes new file mode 100755 index 0000000000..dc5a19d1cd --- /dev/null +++ b/tools/ironic/scripts/cleanup-nodes @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +# **cleanup-nodes** + +# Cleans up baremetal poseur nodes and volumes created during ironic setup +# Assumes calling user has proper libvirt group membership and access. + +set -exu + +LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"} + +VM_COUNT=$1 +NETWORK_BRIDGE=$2 + +for (( idx=0; idx<$VM_COUNT; idx++ )); do + NAME="baremetal${NETWORK_BRIDGE}_${idx}" + VOL_NAME="baremetal${NETWORK_BRIDGE}-${idx}.qcow2" + virsh list | grep -q $NAME && virsh destroy $NAME + virsh list --inactive | grep -q $NAME && virsh undefine $NAME + + if virsh pool-list | grep -q $LIBVIRT_STORAGE_POOL ; then + virsh vol-list $LIBVIRT_STORAGE_POOL | grep -q $VOL_NAME && + virsh vol-delete $VOL_NAME --pool $LIBVIRT_STORAGE_POOL + fi +done diff --git a/tools/ironic/scripts/configure-vm b/tools/ironic/scripts/configure-vm new file mode 100755 index 0000000000..9936b76c4f --- /dev/null +++ b/tools/ironic/scripts/configure-vm @@ -0,0 +1,78 @@ +#!/usr/bin/env python + +import argparse +import os.path + +import libvirt + +templatedir = os.path.join(os.path.dirname(os.path.dirname(__file__)), + 'templates') + + +def main(): + parser = argparse.ArgumentParser( + description="Configure a kvm virtual machine for the seed image.") + parser.add_argument('--name', default='seed', + help='the name to give the machine in libvirt.') + parser.add_argument('--image', + help='Use a custom image file (must be qcow2).') + parser.add_argument('--engine', default='qemu', + help='The virtualization engine to use') + parser.add_argument('--arch', default='i686', + help='The architecture to use') + parser.add_argument('--memory', default='2097152', + help="Maximum memory for the VM in KB.") + parser.add_argument('--cpus', default='1', + help="CPU count for the VM.") + parser.add_argument('--bootdev', default='hd', + help="What boot device to use (hd/network).") + parser.add_argument('--network', default="brbm", + help='The libvirt network name to use') + parser.add_argument('--libvirt-nic-driver', default='e1000', + help='The libvirt network driver to use') + parser.add_argument('--emulator', default=None, + help='Path to emulator bin for vm template') + args = parser.parse_args() + with file(templatedir + '/vm.xml', 'rb') as f: + source_template = f.read() + params = { + 'name': args.name, + 'imagefile': args.image, + 'engine': args.engine, + 'arch': args.arch, + 'memory': args.memory, + 'cpus': args.cpus, + 'bootdev': args.bootdev, + 'network': args.network, + 'emulator': args.emulator, + } + + if args.emulator: + params['emulator'] = args.emulator + else: + if os.path.exists("/usr/bin/kvm"): # Debian + params['emulator'] = "/usr/bin/kvm" + elif os.path.exists("/usr/bin/qemu-kvm"): # Redhat + params['emulator'] = "/usr/bin/qemu-kvm" + + nicparams = { + 'nicdriver': args.libvirt_nic_driver, + 'network': args.network, + } + + params['bm_network'] = """ + + + + + +
+""" % nicparams + + libvirt_template = source_template % params + conn = libvirt.open("qemu:///system") + a = conn.defineXML(libvirt_template) + print ("Created machine %s with UUID %s" % (args.name, a.UUIDString())) + +if __name__ == '__main__': + main() diff --git a/tools/ironic/scripts/create-nodes b/tools/ironic/scripts/create-nodes new file mode 100755 index 0000000000..3232b50776 --- /dev/null +++ b/tools/ironic/scripts/create-nodes @@ -0,0 +1,68 @@ +#!/usr/bin/env bash + +# **create-nodes** + +# Creates baremetal poseur nodes for ironic testing purposes + +set -exu + +# Keep track of the devstack directory +TOP_DIR=$(cd $(dirname "$0")/.. && pwd) + +CPU=$1 +MEM=$(( 1024 * $2 )) +# extra G to allow fuzz for partition table : flavor size and registered size +# need to be different to actual size. +DISK=$(( $3 + 1)) + +case $4 in + i386) ARCH='i686' ;; + amd64) ARCH='x86_64' ;; + *) echo "Unsupported arch $4!" ; exit 1 ;; +esac + +TOTAL=$(($5 - 1)) +BRIDGE=$6 +EMULATOR=$7 + +LIBVIRT_NIC_DRIVER=${LIBVIRT_NIC_DRIVER:-"e1000"} +LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"} + +if ! virsh pool-list --all | grep -q $LIBVIRT_STORAGE_POOL; then + virsh pool-define-as --name $LIBVIRT_STORAGE_POOL dir --target /var/lib/libvirt/images >&2 + virsh pool-autostart $LIBVIRT_STORAGE_POOL >&2 + virsh pool-start $LIBVIRT_STORAGE_POOL >&2 +fi + +pool_state=$(virsh pool-info $LIBVIRT_STORAGE_POOL | grep State | awk '{ print $2 }') +if [ "$pool_state" != "running" ] ; then + [ ! -d /var/lib/libvirt/images ] && sudo mkdir /var/lib/libvirt/images + virsh pool-start $LIBVIRT_STORAGE_POOL >&2 +fi + +PREALLOC= +if [ -f /etc/debian_version ]; then + PREALLOC="--prealloc-metadata" +fi + +DOMS="" +for idx in $(seq 0 $TOTAL) ; do + NAME="baremetal${BRIDGE}_${idx}" + DOMS="$DOMS $NAME" + VOL_NAME="baremetal${BRIDGE}-${idx}.qcow2" + (virsh list --all | grep -q $NAME) && continue + + virsh vol-list --pool $LIBVIRT_STORAGE_POOL | grep -q $VOL_NAME && + virsh vol-delete $VOL_NAME --pool $LIBVIRT_STORAGE_POOL >&2 + virsh vol-create-as $LIBVIRT_STORAGE_POOL ${VOL_NAME} ${DISK}G --format qcow2 $PREALLOC >&2 + volume_path=$(virsh vol-path --pool $LIBVIRT_STORAGE_POOL $VOL_NAME) + # Pre-touch the VM to set +C, as it can only be set on empty files. + sudo touch "$volume_path" + sudo chattr +C "$volume_path" || true + $TOP_DIR/scripts/configure-vm --bootdev network --name $NAME --image "$volume_path" --arch $ARCH --cpus $CPU --memory $MEM --libvirt-nic-driver $LIBVIRT_NIC_DRIVER --emulator $EMULATOR --network $BRIDGE >&2 +done + +for dom in $DOMS ; do + # echo mac + virsh dumpxml $dom | grep "mac address" | head -1 | cut -d\' -f2 +done diff --git a/tools/ironic/scripts/setup-network b/tools/ironic/scripts/setup-network new file mode 100755 index 0000000000..8c3ea901b4 --- /dev/null +++ b/tools/ironic/scripts/setup-network @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +# **setup-network** + +# Setups openvswitch libvirt network suitable for +# running baremetal poseur nodes for ironic testing purposes + +set -exu + +# Keep track of the devstack directory +TOP_DIR=$(cd $(dirname "$0")/.. && pwd) +BRIDGE_SUFFIX=${1:-''} +BRIDGE_NAME=brbm$BRIDGE_SUFFIX + +# Only add bridge if missing +(sudo ovs-vsctl list-br | grep ${BRIDGE_NAME}$) || sudo ovs-vsctl add-br ${BRIDGE_NAME} + +# remove bridge before replacing it. +(virsh net-list | grep "${BRIDGE_NAME} ") && virsh net-destroy ${BRIDGE_NAME} +(virsh net-list --inactive | grep "${BRIDGE_NAME} ") && virsh net-undefine ${BRIDGE_NAME} + +virsh net-define <(sed s/brbm/$BRIDGE_NAME/ $TOP_DIR/templates/brbm.xml) +virsh net-autostart ${BRIDGE_NAME} +virsh net-start ${BRIDGE_NAME} diff --git a/tools/ironic/templates/brbm.xml b/tools/ironic/templates/brbm.xml new file mode 100644 index 0000000000..0769d3f1d0 --- /dev/null +++ b/tools/ironic/templates/brbm.xml @@ -0,0 +1,6 @@ + + brbm + + + + diff --git a/tools/ironic/templates/tftpd-xinetd.template b/tools/ironic/templates/tftpd-xinetd.template new file mode 100644 index 0000000000..7b9b0f8a78 --- /dev/null +++ b/tools/ironic/templates/tftpd-xinetd.template @@ -0,0 +1,11 @@ +service tftp +{ + protocol = udp + port = 69 + socket_type = dgram + wait = yes + user = root + server = /usr/sbin/in.tftpd + server_args = -v -v -v -v -v --map-file %TFTPBOOT_DIR%/map-file %TFTPBOOT_DIR% + disable = no +} diff --git a/tools/ironic/templates/vm.xml b/tools/ironic/templates/vm.xml new file mode 100644 index 0000000000..b18dec055f --- /dev/null +++ b/tools/ironic/templates/vm.xml @@ -0,0 +1,43 @@ + + %(name)s + %(memory)s + %(cpus)s + + hvm + + + + + + + + + + destroy + restart + restart + + %(emulator)s + + + + +
+ + +
+ + %(network)s + %(bm_network)s + + +