From ca1b85283b2d53e5e6e52a90a57a9310dd948d5c Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Sun, 17 Nov 2013 15:44:32 -0600 Subject: [PATCH 0001/4119] Mute false alarm when installing docker In install_docker.sh, we will restart docker service, then connect to /var/run/docker.sock with retry mechanism. At the first contacting with /var/run/docker.sock, when docker service is not ready, it may complain some error. Mute this false alarm. Change-Id: If00a18d2e3ddee951662e272d47ae84215f16ad2 Closes-Bug: #1252087 --- tools/docker/install_docker.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh index 2e5b510c41..375cfe958b 100755 --- a/tools/docker/install_docker.sh +++ b/tools/docker/install_docker.sh @@ -45,7 +45,7 @@ restart_service docker echo "Waiting for docker daemon to start..." DOCKER_GROUP=$(groups | cut -d' ' -f1) -CONFIGURE_CMD="while ! /bin/echo -e 'GET /v1.3/version HTTP/1.0\n\n' | socat - unix-connect:$DOCKER_UNIX_SOCKET | grep -q '200 OK'; do +CONFIGURE_CMD="while ! /bin/echo -e 'GET /v1.3/version HTTP/1.0\n\n' | socat - unix-connect:$DOCKER_UNIX_SOCKET 2>/dev/null | grep -q '200 OK'; do # Set the right group on docker unix socket before retrying sudo chgrp $DOCKER_GROUP $DOCKER_UNIX_SOCKET sudo chmod g+rw $DOCKER_UNIX_SOCKET From 9e136b4adee6ce33fdbf01e0a8614c186c5f20b7 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 16 Dec 2013 15:52:03 +0900 Subject: [PATCH 0002/4119] Neutron: Define a utility function to add neutron service class When enabling neutron service (i.e. enable_service q-xxx), related code unconditionally adds a necessary plugin class to Q_SERVICE_PLUGIN_CLASSES. Which may cause duplication in Q_SERVICE_PLUGIN_CLASSES when Q_SERVICE_PLUGIN_CLASSES is explicitly specified in localrc. As a result, neutron server fails to start. This patch introduces a utility function to add service class, and check duplication. Closes-Bug: #1261291 Change-Id: Id2880c7647babfccc3e8d9fc60dd93c4b3997ed9 --- lib/neutron | 10 ++++++++++ lib/neutron_plugins/ml2 | 6 +----- lib/neutron_plugins/services/firewall | 6 +----- lib/neutron_plugins/services/loadbalancer | 6 +----- lib/neutron_plugins/services/metering | 6 +----- lib/neutron_plugins/services/vpn | 6 +----- 6 files changed, 15 insertions(+), 25 deletions(-) diff --git a/lib/neutron b/lib/neutron index b05b16d72e..38081653e4 100644 --- a/lib/neutron +++ b/lib/neutron @@ -744,6 +744,16 @@ function _configure_neutron_service() { # Utility Functions #------------------ +# _neutron_service_plugin_class_add() - add service plugin class +function _neutron_service_plugin_class_add() { + local service_plugin_class=$1 + if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then + Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class + elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then + Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class" + fi +} + # _neutron_setup_rootwrap() - configure Neutron's rootwrap function _neutron_setup_rootwrap() { if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index b5b1873f3f..ab4e3474a6 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -54,11 +54,7 @@ function neutron_plugin_configure_common() { Q_PLUGIN_CLASS="neutron.plugins.ml2.plugin.Ml2Plugin" # The ML2 plugin delegates L3 routing/NAT functionality to # the L3 service plugin which must therefore be specified. - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$ML2_L3_PLUGIN - else - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$ML2_L3_PLUGIN" - fi + _neutron_service_plugin_class_add $ML2_L3_PLUGIN } function neutron_plugin_configure_service() { diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall index 1597e8577d..97cc5a28fd 100644 --- a/lib/neutron_plugins/services/firewall +++ b/lib/neutron_plugins/services/firewall @@ -8,11 +8,7 @@ set +o xtrace FWAAS_PLUGIN=neutron.services.firewall.fwaas_plugin.FirewallPlugin function neutron_fwaas_configure_common() { - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$FWAAS_PLUGIN - else - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$FWAAS_PLUGIN" - fi + _neutron_service_plugin_class_add $FWAAS_PLUGIN } function neutron_fwaas_configure_driver() { diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer index c38f904b69..6ff991c855 100644 --- a/lib/neutron_plugins/services/loadbalancer +++ b/lib/neutron_plugins/services/loadbalancer @@ -19,11 +19,7 @@ function neutron_agent_lbaas_install_agent_packages() { } function neutron_agent_lbaas_configure_common() { - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$LBAAS_PLUGIN - else - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$LBAAS_PLUGIN" - fi + _neutron_service_plugin_class_add $LBAAS_PLUGIN } function neutron_agent_lbaas_configure_agent() { diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering index 629f3b788a..5cabfbfc3b 100644 --- a/lib/neutron_plugins/services/metering +++ b/lib/neutron_plugins/services/metering @@ -10,11 +10,7 @@ AGENT_METERING_BINARY="$NEUTRON_BIN_DIR/neutron-metering-agent" METERING_PLUGIN="neutron.services.metering.metering_plugin.MeteringPlugin" function neutron_agent_metering_configure_common() { - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$METERING_PLUGIN - else - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$METERING_PLUGIN" - fi + _neutron_service_plugin_class_add $METERING_PLUGIN } function neutron_agent_metering_configure_agent() { diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn index b8f5c7d56b..1ab07cb93c 100644 --- a/lib/neutron_plugins/services/vpn +++ b/lib/neutron_plugins/services/vpn @@ -15,11 +15,7 @@ function neutron_vpn_install_agent_packages() { } function neutron_vpn_configure_common() { - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$VPN_PLUGIN - else - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$VPN_PLUGIN" - fi + _neutron_service_plugin_class_add $VPN_PLUGIN } # Restore xtrace From 16312738d1a8302537e76e1e6cdeac85d63b64aa Mon Sep 17 00:00:00 2001 From: Jianing Yang Date: Sun, 22 Dec 2013 10:47:39 +0800 Subject: [PATCH 0003/4119] Correct glance db_sync command Closes-Bug: #1263431 Change-Id: I30a53adfdd8e00a9995595af2e090190bac241a0 --- lib/glance | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index b278796d21..135136db7e 100644 --- a/lib/glance +++ b/lib/glance @@ -176,7 +176,7 @@ function init_glance() { recreate_database glance utf8 # Migrate glance database - $GLANCE_BIN_DIR/glance-manage db sync + $GLANCE_BIN_DIR/glance-manage db_sync create_glance_cache_dir } From 60fcfb5c91063bb71252b7077a363092d8bebe2b Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Mon, 23 Dec 2013 17:23:47 +0000 Subject: [PATCH 0004/4119] XenAPI: Fix bug with Xen ext4-using guests Ubuntu saucy is using ext4, which means it hits a barrier bug with certain versions of Xen, leading to a read only filesystem. This is bug https://bugs.launchpad.net/ubuntu/+source/linux/+bug/824089 Change-Id: I9a72b203d473dc555324d44ad7c240c80dccda15 --- tools/xen/prepare_guest_template.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh index 6ea6f6321d..546ac99cd9 100755 --- a/tools/xen/prepare_guest_template.sh +++ b/tools/xen/prepare_guest_template.sh @@ -79,3 +79,7 @@ bash /opt/stack/prepare_guest.sh \\ "$GUEST_PASSWORD" "$XS_TOOLS_PATH" "$STACK_USER" \\ > /opt/stack/prepare_guest.log 2>&1 EOF + +# Need to set barrier=0 to avoid a Xen bug +# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/824089 +sed -i -e 's/errors=/barrier=0,errors=/' $STAGING_DIR/etc/fstab From e4b85590037974b04487be5b4e23166a8a35d9dc Mon Sep 17 00:00:00 2001 From: Ben Nemec Date: Fri, 20 Dec 2013 19:51:04 +0000 Subject: [PATCH 0005/4119] Set default_network in tempest.conf This is to support testing of change Ia78582cac3790653c2281a5b63d953cd46d5c290 in Tempest. Change-Id: Ibb812e2598fb11b7eef21a0868ee9baeea73186c --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 0969b2df1f..95b300ce77 100644 --- a/lib/tempest +++ b/lib/tempest @@ -283,6 +283,7 @@ function configure_tempest() { iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable" iniset $TEMPEST_CONF network public_network_id "$public_network_id" iniset $TEMPEST_CONF network public_router_id "$public_router_id" + iniset $TEMPEST_CONF network default_network "$FIXED_RANGE" # boto iniset $TEMPEST_CONF boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" From 3bd85c9d6e257fc952cb3c6d0c09e199685bd5ed Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Wed, 25 Dec 2013 22:14:11 +0900 Subject: [PATCH 0006/4119] Change the libvirtd log level to DEBUG Gate tests fail sometimes due to libvirt problems, but it is difficult to investigate their reasons or workarounds because there is not any log about libvirt. This patch changes the log level of libvirtd to DEBUG for investigating libvirt problems. Change-Id: Ib6559ff978fa813d0332f2339d241dd3437196ee Related-Bug: #1254872 --- lib/nova_plugins/hypervisor-libvirt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 6f90f4ac17..ef40e7ab4c 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -93,6 +93,9 @@ EOF" fi fi + # Change the libvirtd log level to DEBUG. + sudo sed -i s/"#log_level = 3"/"log_level = 1"/ /etc/libvirt/libvirtd.conf + # The user that nova runs as needs to be member of **libvirtd** group otherwise # nova-compute will be unable to use libvirt. if ! getent group $LIBVIRT_GROUP >/dev/null; then From 9aadec380605e4b2aab0fb159c4186618a284853 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Fri, 27 Dec 2013 19:08:26 +0900 Subject: [PATCH 0007/4119] Remove some keystone resource parsers Current "keystone" command can parse the specified resources(tenant, user, role, service) by itself. Then it is unnecessary to translate resource names to resource ids in devstack. This patch removes these resource parsers from devstack for cleanup. Change-Id: Ibae06581b471f02168b559b4ca0c10f14996d661 --- files/keystone_data.sh | 113 +++++++++++++++++++---------------------- 1 file changed, 51 insertions(+), 62 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index ea2d52d114..07b6b601d2 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -28,16 +28,6 @@ export SERVICE_TOKEN=$SERVICE_TOKEN export SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} -function get_id () { - echo `"$@" | awk '/ id / { print $4 }'` -} - -# Lookups -SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") -ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") -MEMBER_ROLE=$(keystone role-list | awk "/ Member / { print \$2 }") - - # Roles # ----- @@ -45,53 +35,52 @@ MEMBER_ROLE=$(keystone role-list | awk "/ Member / { print \$2 }") # The admin role in swift allows a user to act as an admin for their tenant, # but ResellerAdmin is needed for a user to act as any tenant. The name of this # role is also configurable in swift-proxy.conf -RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin) +keystone role-create --name=ResellerAdmin # Service role, so service users do not have to be admins -SERVICE_ROLE=$(get_id keystone role-create --name=service) +keystone role-create --name=service # Services # -------- if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then - NOVA_USER=$(keystone user-list | awk "/ nova / { print \$2 }") # Nova needs ResellerAdmin role to download images when accessing # swift through the s3 api. keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $NOVA_USER \ - --role-id $RESELLER_ROLE + --tenant $SERVICE_TENANT_NAME \ + --user nova \ + --role ResellerAdmin fi # Heat if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then - HEAT_USER=$(get_id keystone user-create --name=heat \ + keystone user-create --name=heat \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=heat@example.com) - keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $HEAT_USER \ - --role-id $SERVICE_ROLE + --tenant $SERVICE_TENANT_NAME \ + --email=heat@example.com + keystone user-role-add --tenant $SERVICE_TENANT_NAME \ + --user heat \ + --role service # heat_stack_user role is for users created by Heat keystone role-create --name heat_stack_user if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - HEAT_CFN_SERVICE=$(get_id keystone service-create \ + keystone service-create \ --name=heat-cfn \ --type=cloudformation \ - --description="Heat CloudFormation Service") + --description="Heat CloudFormation Service" keystone endpoint-create \ --region RegionOne \ - --service_id $HEAT_CFN_SERVICE \ + --service heat-cfn \ --publicurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \ --adminurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \ --internalurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" - HEAT_SERVICE=$(get_id keystone service-create \ + keystone service-create \ --name=heat \ --type=orchestration \ - --description="Heat Service") + --description="Heat Service" keystone endpoint-create \ --region RegionOne \ - --service_id $HEAT_SERVICE \ + --service heat \ --publicurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ --adminurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ --internalurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" @@ -100,23 +89,23 @@ fi # Glance if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then - GLANCE_USER=$(get_id keystone user-create \ + keystone user-create \ --name=glance \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=glance@example.com) + --tenant $SERVICE_TENANT_NAME \ + --email=glance@example.com keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $GLANCE_USER \ - --role-id $ADMIN_ROLE + --tenant $SERVICE_TENANT_NAME \ + --user glance \ + --role admin if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - GLANCE_SERVICE=$(get_id keystone service-create \ + keystone service-create \ --name=glance \ --type=image \ - --description="Glance Image Service") + --description="Glance Image Service" keystone endpoint-create \ --region RegionOne \ - --service_id $GLANCE_SERVICE \ + --service glance \ --publicurl "http://$SERVICE_HOST:9292" \ --adminurl "http://$SERVICE_HOST:9292" \ --internalurl "http://$SERVICE_HOST:9292" @@ -125,25 +114,25 @@ fi # Ceilometer if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then - CEILOMETER_USER=$(get_id keystone user-create --name=ceilometer \ + keystone user-create --name=ceilometer \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=ceilometer@example.com) - keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $CEILOMETER_USER \ - --role-id $ADMIN_ROLE + --tenant $SERVICE_TENANT_NAME \ + --email=ceilometer@example.com + keystone user-role-add --tenant $SERVICE_TENANT_NAME \ + --user ceilometer \ + --role admin # Ceilometer needs ResellerAdmin role to access swift account stats. - keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $CEILOMETER_USER \ - --role-id $RESELLER_ROLE + keystone user-role-add --tenant $SERVICE_TENANT_NAME \ + --user ceilometer \ + --role ResellerAdmin if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - CEILOMETER_SERVICE=$(get_id keystone service-create \ + keystone service-create \ --name=ceilometer \ --type=metering \ - --description="Ceilometer Service") + --description="Ceilometer Service" keystone endpoint-create \ --region RegionOne \ - --service_id $CEILOMETER_SERVICE \ + --service ceilometer \ --publicurl "http://$SERVICE_HOST:8777" \ --adminurl "http://$SERVICE_HOST:8777" \ --internalurl "http://$SERVICE_HOST:8777" @@ -153,13 +142,13 @@ fi # EC2 if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - EC2_SERVICE=$(get_id keystone service-create \ + keystone service-create \ --name=ec2 \ --type=ec2 \ - --description="EC2 Compatibility Layer") + --description="EC2 Compatibility Layer" keystone endpoint-create \ --region RegionOne \ - --service_id $EC2_SERVICE \ + --service ec2 \ --publicurl "http://$SERVICE_HOST:8773/services/Cloud" \ --adminurl "http://$SERVICE_HOST:8773/services/Admin" \ --internalurl "http://$SERVICE_HOST:8773/services/Cloud" @@ -169,13 +158,13 @@ fi # S3 if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift3" ]]; then if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - S3_SERVICE=$(get_id keystone service-create \ + keystone service-create \ --name=s3 \ --type=s3 \ - --description="S3") + --description="S3" keystone endpoint-create \ --region RegionOne \ - --service_id $S3_SERVICE \ + --service s3 \ --publicurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ --adminurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ --internalurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" @@ -185,14 +174,14 @@ fi if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then # Tempest has some tests that validate various authorization checks # between two regular users in separate tenants - ALT_DEMO_TENANT=$(get_id keystone tenant-create \ - --name=alt_demo) - ALT_DEMO_USER=$(get_id keystone user-create \ + keystone tenant-create \ + --name=alt_demo + keystone user-create \ --name=alt_demo \ --pass="$ADMIN_PASSWORD" \ - --email=alt_demo@example.com) + --email=alt_demo@example.com keystone user-role-add \ - --tenant-id $ALT_DEMO_TENANT \ - --user-id $ALT_DEMO_USER \ - --role-id $MEMBER_ROLE + --tenant alt_demo \ + --user alt_demo \ + --role Member fi From 0f9a1b058423b293935b414b2035713d8ead3e71 Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Sat, 28 Dec 2013 03:42:07 +0000 Subject: [PATCH 0008/4119] Migrating trove to entry points partially implements blueprint entrypoints-for-binscripts Change-Id: Iaafde0ab7f27598d566fc008fba7eddc582139c9 --- lib/trove | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/lib/trove b/lib/trove index 6d5a56e456..f8e3eddfe2 100644 --- a/lib/trove +++ b/lib/trove @@ -30,7 +30,13 @@ TROVECLIENT_DIR=$DEST/python-troveclient TROVE_CONF_DIR=/etc/trove TROVE_LOCAL_CONF_DIR=$TROVE_DIR/etc/trove TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove} -TROVE_BIN_DIR=/usr/local/bin + +# Support entry points installation of console scripts +if [[ -d $TROVE_DIR/bin ]]; then + TROVE_BIN_DIR=$TROVE_DIR/bin +else + TROVE_BIN_DIR=$(get_python_exec_prefix) +fi # setup_trove_logging() - Adds logging configuration to conf files function setup_trove_logging() { @@ -178,14 +184,14 @@ function init_trove() { recreate_database trove utf8 #Initialize the trove database - $TROVE_DIR/bin/trove-manage db_sync + $TROVE_BIN_DIR/trove-manage db_sync } # start_trove() - Start running processes, including screen function start_trove() { - screen_it tr-api "cd $TROVE_DIR; bin/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug 2>&1" - screen_it tr-tmgr "cd $TROVE_DIR; bin/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug 2>&1" - screen_it tr-cond "cd $TROVE_DIR; bin/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug 2>&1" + screen_it tr-api "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug 2>&1" + screen_it tr-tmgr "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug 2>&1" + screen_it tr-cond "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug 2>&1" } # stop_trove() - Stop running processes From 3ee52c81a12f1b823c1bc22e39d9f09a8d8b2ca8 Mon Sep 17 00:00:00 2001 From: Ben Nemec Date: Thu, 12 Dec 2013 19:26:12 +0000 Subject: [PATCH 0009/4119] Ensure hostname resolves correctly rabbitmq hangs on startup if the unqualified hostname for the system doesn't resolve properly. This change ensures that the hostname is added to /etc/hosts so that will never happen with devstack. Change-Id: I2c250f38f9feb18d1a59f3a457c6d01c1d98499c --- stack.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/stack.sh b/stack.sh index ce5fbd47e5..3ee6b1cde2 100755 --- a/stack.sh +++ b/stack.sh @@ -234,6 +234,13 @@ safe_chmod 0755 $DEST # a basic test for $DEST path permissions (fatal on error unless skipped) check_path_perm_sanity ${DEST} +# Certain services such as rabbitmq require that the local hostname resolves +# correctly. Make sure it exists in /etc/hosts so that is always true. +LOCAL_HOSTNAME=`hostname -s` +if [ -z "`grep ^127.0.0.1 /etc/hosts | grep $LOCAL_HOSTNAME`" ]; then + sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts +fi + # Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without # Internet access. ``stack.sh`` must have been previously run with Internet # access to install prerequisites and fetch repositories. From 00b434182e3c04976e03b94490359fa26e71ef69 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 2 Jan 2014 10:33:21 +0000 Subject: [PATCH 0010/4119] Handle more nicely when role root is already here When using postgresql we were handling the fallback if the role root was already here but this was still printing an error message, try to make it a bit smarter. Closes-Bug: #1265477 Change-Id: Ib3768dd182ab968e81038f900550f641b9a2af5c --- lib/databases/postgresql | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 519479ad68..60e5a33715 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -64,9 +64,13 @@ function configure_database_postgresql { sudo sed -i "/^host/s/all\s\+::1\/128\s\+ident/$DATABASE_USER\t::0\/0\tpassword/" $PG_HBA restart_service postgresql - # If creating the role fails, chances are it already existed. Try to alter it. - sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" || \ - sudo -u root sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" + # Create the role if it's not here or else alter it. + root_roles=$(sudo -u root sudo -u postgres -i psql -t -c "SELECT 'HERE' from pg_roles where rolname='root'") + if [[ ${root_roles} == *HERE ]];then + sudo -u root sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" + else + sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" + fi } function install_database_postgresql { From 0915e0c6bd9d9d370fbf05963704690580af62ec Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Thu, 2 Jan 2014 15:05:41 +0100 Subject: [PATCH 0011/4119] Add oslo.rootwrap to devstack gate oslo.rootwrap recently graduated but was not made part of the devstack-gate. This change is part of a series of changes affecting devstack-gate, config and devstack which will collectively fix this: https://review.openstack.org/#/q/status:open+topic:rootwrap-gate,n,z This should probably be merged once the config and devstack-gate changes are in, so that it can be self-testing. Change-Id: I7b1332c8004845a0dd76e27d871370d41d4524ac --- lib/oslo | 4 ++++ stackrc | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/lib/oslo b/lib/oslo index 816ae9a48a..f644ed76c3 100644 --- a/lib/oslo +++ b/lib/oslo @@ -22,6 +22,7 @@ set +o xtrace # -------- OSLOCFG_DIR=$DEST/oslo.config OSLOMSG_DIR=$DEST/oslo.messaging +OSLORWRAP_DIR=$DEST/oslo.rootwrap # Entry Points # ------------ @@ -37,6 +38,9 @@ function install_oslo() { git_clone $OSLOMSG_REPO $OSLOMSG_DIR $OSLOMSG_BRANCH setup_develop $OSLOMSG_DIR + + git_clone $OSLORWRAP_REPO $OSLORWRAP_DIR $OSLORWRAP_BRANCH + setup_develop $OSLORWRAP_DIR } # cleanup_oslo() - purge possibly old versions of oslo diff --git a/stackrc b/stackrc index 695bdb15d6..3fdc566ed2 100644 --- a/stackrc +++ b/stackrc @@ -136,6 +136,10 @@ OSLOCFG_BRANCH=${OSLOCFG_BRANCH:-master} OSLOMSG_REPO=${OSLOMSG_REPO:-${GIT_BASE}/openstack/oslo.messaging.git} OSLOMSG_BRANCH=${OSLOMSG_BRANCH:-master} +# oslo.rootwrap +OSLORWRAP_REPO=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git} +OSLORWRAP_BRANCH=${OSLORWRAP_BRANCH:-master} + # pbr drives the setuptools configs PBR_REPO=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git} PBR_BRANCH=${PBR_BRANCH:-master} From 05952e3fcc6bdd9ccd1c7980e6a73c527711c08c Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Sun, 5 Jan 2014 07:59:06 -0800 Subject: [PATCH 0012/4119] Configuration changes required to support VMware NSX plugin (Formerly known as Nicira NVP plugin). Following Neutron change 79fbeb7ebebc0dfbe143aee96fbc250d1b9e7582, this patch introduces the new naming scheme for Neutron VMware NSX plugin configuration. Related-blueprint: nvp-third-part-support (aka bp vmware-nsx-third-party) Partial-implements blueprint: nicira-plugin-renaming Change-Id: If7790887661507bfdec6d2b97c0f99609039aa73 --- exercises/neutron-adv-test.sh | 4 +- lib/neutron_plugins/{nicira => vmware_nsx} | 77 ++++++++++--------- lib/neutron_thirdparty/{nicira => vmware_nsx} | 62 +++++++-------- 3 files changed, 72 insertions(+), 71 deletions(-) rename lib/neutron_plugins/{nicira => vmware_nsx} (59%) rename lib/neutron_thirdparty/{nicira => vmware_nsx} (50%) diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index 0a100c0fe8..0c0d42f458 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -400,10 +400,10 @@ main() { echo Description echo echo Copyright 2012, Cisco Systems - echo Copyright 2012, Nicira Networks, Inc. + echo Copyright 2012, VMware, Inc. echo Copyright 2012, NTT MCL, Inc. echo - echo Please direct any questions to dedutta@cisco.com, dan@nicira.com, nachi@nttmcl.com + echo Please direct any questions to dedutta@cisco.com, dwendlandt@vmware.com, nachi@nttmcl.com echo diff --git a/lib/neutron_plugins/nicira b/lib/neutron_plugins/vmware_nsx similarity index 59% rename from lib/neutron_plugins/nicira rename to lib/neutron_plugins/vmware_nsx index 87d3c3d17b..d506cb6f8d 100644 --- a/lib/neutron_plugins/nicira +++ b/lib/neutron_plugins/vmware_nsx @@ -1,5 +1,5 @@ -# Neutron Nicira NVP plugin -# --------------------------- +# Neutron VMware NSX plugin +# ------------------------- # Save trace setting MY_XTRACE=$(set +o | grep xtrace) @@ -9,10 +9,10 @@ source $TOP_DIR/lib/neutron_plugins/ovs_base function setup_integration_bridge() { _neutron_ovs_base_setup_bridge $OVS_BRIDGE - # Set manager to NVP controller (1st of list) - if [[ "$NVP_CONTROLLERS" != "" ]]; then + # Set manager to NSX controller (1st of list) + if [[ "$NSX_CONTROLLERS" != "" ]]; then # Get the first controller - controllers=(${NVP_CONTROLLERS//,/ }) + controllers=(${NSX_CONTROLLERS//,/ }) OVS_MGR_IP=${controllers[0]} else die $LINENO "Error - No controller specified. Unable to set a manager for OVS" @@ -21,7 +21,7 @@ function setup_integration_bridge() { } function is_neutron_ovs_base_plugin() { - # NVP uses OVS, but not the l3-agent + # NSX uses OVS, but not the l3-agent return 0 } @@ -33,14 +33,15 @@ function neutron_plugin_create_nova_conf() { } function neutron_plugin_install_agent_packages() { - # Nicira Plugin does not run q-agt, but it currently needs dhcp and metadata agents + # VMware NSX Plugin does not run q-agt, but it currently needs dhcp and metadata agents _neutron_ovs_base_install_agent_packages } function neutron_plugin_configure_common() { - Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nicira - Q_PLUGIN_CONF_FILENAME=nvp.ini - Q_DB_NAME="neutron_nvp" + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware + Q_PLUGIN_CONF_FILENAME=nsx.ini + Q_DB_NAME="neutron_nsx" + # TODO(armando-migliaccio): rename this once the code rename is complete Q_PLUGIN_CLASS="neutron.plugins.nicira.NeutronPlugin.NvpPluginV2" } @@ -57,76 +58,76 @@ function neutron_plugin_configure_dhcp_agent() { } function neutron_plugin_configure_l3_agent() { - # Nicira plugin does not run L3 agent - die $LINENO "q-l3 should must not be executed with Nicira plugin!" + # VMware NSX plugin does not run L3 agent + die $LINENO "q-l3 should must not be executed with VMware NSX plugin!" } function neutron_plugin_configure_plugin_agent() { - # Nicira plugin does not run L2 agent - die $LINENO "q-agt must not be executed with Nicira plugin!" + # VMware NSX plugin does not run L2 agent + die $LINENO "q-agt must not be executed with VMware NSX plugin!" } function neutron_plugin_configure_service() { if [[ "$MAX_LP_PER_BRIDGED_LS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE nvp max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS + iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS fi if [[ "$MAX_LP_PER_OVERLAY_LS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE nvp max_lp_per_overlay_ls $MAX_LP_PER_OVERLAY_LS + iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_overlay_ls $MAX_LP_PER_OVERLAY_LS fi if [[ "$FAILOVER_TIME" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE nvp failover_time $FAILOVER_TIME + iniset /$Q_PLUGIN_CONF_FILE nsx failover_time $FAILOVER_TIME fi if [[ "$CONCURRENT_CONNECTIONS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE nvp concurrent_connections $CONCURRENT_CONNECTIONS + iniset /$Q_PLUGIN_CONF_FILE nsx concurrent_connections $CONCURRENT_CONNECTIONS fi if [[ "$DEFAULT_TZ_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_tz_uuid $DEFAULT_TZ_UUID else - die $LINENO "The nicira plugin won't work without a default transport zone." + die $LINENO "The VMware NSX plugin won't work without a default transport zone." fi if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID Q_L3_ENABLED=True Q_L3_ROUTER_PER_TENANT=True - iniset /$Q_PLUGIN_CONF_FILE nvp metadata_mode access_network + iniset /$Q_PLUGIN_CONF_FILE nsx metadata_mode access_network fi if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID fi - # NVP_CONTROLLERS must be a comma separated string - if [[ "$NVP_CONTROLLERS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_controllers $NVP_CONTROLLERS + # NSX_CONTROLLERS must be a comma separated string + if [[ "$NSX_CONTROLLERS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_controllers $NSX_CONTROLLERS else - die $LINENO "The nicira plugin needs at least an NVP controller." + die $LINENO "The VMware NSX plugin needs at least an NSX controller." fi - if [[ "$NVP_USER" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_user $NVP_USER + if [[ "$NSX_USER" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_user $NSX_USER fi - if [[ "$NVP_PASSWORD" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_password $NVP_PASSWORD + if [[ "$NSX_PASSWORD" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_password $NSX_PASSWORD fi - if [[ "$NVP_REQ_TIMEOUT" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT req_timeout $NVP_REQ_TIMEOUT + if [[ "$NSX_REQ_TIMEOUT" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT req_timeout $NSX_REQ_TIMEOUT fi - if [[ "$NVP_HTTP_TIMEOUT" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT http_timeout $NVP_HTTP_TIMEOUT + if [[ "$NSX_HTTP_TIMEOUT" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT http_timeout $NSX_HTTP_TIMEOUT fi - if [[ "$NVP_RETRIES" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT retries $NVP_RETRIES + if [[ "$NSX_RETRIES" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT retries $NSX_RETRIES fi - if [[ "$NVP_REDIRECTS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT redirects $NVP_REDIRECTS + if [[ "$NSX_REDIRECTS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT redirects $NSX_REDIRECTS fi if [[ "$AGENT_MODE" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE nvp agent_mode $AGENT_MODE + iniset /$Q_PLUGIN_CONF_FILE nsx agent_mode $AGENT_MODE if [[ "$AGENT_MODE" == "agentless" ]]; then if [[ "$DEFAULT_SERVICE_CLUSTER_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_service_cluster_uuid $DEFAULT_SERVICE_CLUSTER_UUID else die $LINENO "Agentless mode requires a service cluster." fi - iniset /$Q_PLUGIN_CONF_FILE nvp_metadata metadata_server_address $Q_META_DATA_IP + iniset /$Q_PLUGIN_CONF_FILE nsx_metadata metadata_server_address $Q_META_DATA_IP fi fi } diff --git a/lib/neutron_thirdparty/nicira b/lib/neutron_thirdparty/vmware_nsx similarity index 50% rename from lib/neutron_thirdparty/nicira rename to lib/neutron_thirdparty/vmware_nsx index a24392cd4d..70d348274f 100644 --- a/lib/neutron_thirdparty/nicira +++ b/lib/neutron_thirdparty/vmware_nsx @@ -1,14 +1,14 @@ -# Nicira NVP +# VMware NSX # ---------- # This third-party addition can be used to configure connectivity between a DevStack instance -# and an NVP Gateway in dev/test environments. In order to use this correctly, the following +# and an NSX Gateway in dev/test environments. In order to use this correctly, the following # env variables need to be set (e.g. in your localrc file): # -# * enable_service nicira --> to execute this third-party addition +# * enable_service vmware_nsx --> to execute this third-party addition # * PUBLIC_BRIDGE --> bridge used for external connectivity, typically br-ex -# * NVP_GATEWAY_NETWORK_INTERFACE --> interface used to communicate with the NVP Gateway -# * NVP_GATEWAY_NETWORK_CIDR --> CIDR to configure br-ex, e.g. 172.24.4.211/24 +# * NSX_GATEWAY_NETWORK_INTERFACE --> interface used to communicate with the NSX Gateway +# * NSX_GATEWAY_NETWORK_CIDR --> CIDR to configure br-ex, e.g. 172.24.4.211/24 # Save trace setting MY_XTRACE=$(set +o | grep xtrace) @@ -17,64 +17,64 @@ set +o xtrace # This is the interface that connects the Devstack instance # to an network that allows it to talk to the gateway for # testing purposes -NVP_GATEWAY_NETWORK_INTERFACE=${NVP_GATEWAY_NETWORK_INTERFACE:-eth2} -# Re-declare floating range as it's needed also in stop_nicira, which +NSX_GATEWAY_NETWORK_INTERFACE=${NSX_GATEWAY_NETWORK_INTERFACE:-eth2} +# Re-declare floating range as it's needed also in stop_vmware_nsx, which # is invoked by unstack.sh FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} -function configure_nicira() { +function configure_vmware_nsx() { : } -function init_nicira() { - if ! is_set NVP_GATEWAY_NETWORK_CIDR; then - NVP_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} +function init_vmware_nsx() { + if ! is_set NSX_GATEWAY_NETWORK_CIDR; then + NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address to set on br-ex was not specified. " - echo "Defaulting to "$NVP_GATEWAY_NETWORK_CIDR + echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR fi # Make sure the interface is up, but not configured - sudo ip link set $NVP_GATEWAY_NETWORK_INTERFACE up + sudo ip link set $NSX_GATEWAY_NETWORK_INTERFACE up # Save and then flush the IP addresses on the interface - addresses=$(ip addr show dev $NVP_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'}) - sudo ip addr flush $NVP_GATEWAY_NETWORK_INTERFACE - # Use the PUBLIC Bridge to route traffic to the NVP gateway + addresses=$(ip addr show dev $NSX_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'}) + sudo ip addr flush $NSX_GATEWAY_NETWORK_INTERFACE + # Use the PUBLIC Bridge to route traffic to the NSX gateway # NOTE(armando-migliaccio): if running in a nested environment this will work # only with mac learning enabled, portsecurity and security profiles disabled - # The public bridge might not exist for the NVP plugin if Q_USE_DEBUG_COMMAND is off + # The public bridge might not exist for the NSX plugin if Q_USE_DEBUG_COMMAND is off # Try to create it anyway sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE - sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_INTERFACE - nvp_gw_net_if_mac=$(ip link show $NVP_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}') - sudo ip link set address $nvp_gw_net_if_mac dev $PUBLIC_BRIDGE + sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_INTERFACE + nsx_gw_net_if_mac=$(ip link show $NSX_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}') + sudo ip link set address $nsx_gw_net_if_mac dev $PUBLIC_BRIDGE for address in $addresses; do sudo ip addr add dev $PUBLIC_BRIDGE $address done - sudo ip addr add dev $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_CIDR + sudo ip addr add dev $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_CIDR } -function install_nicira() { +function install_vmware_nsx() { : } -function start_nicira() { +function start_vmware_nsx() { : } -function stop_nicira() { - if ! is_set NVP_GATEWAY_NETWORK_CIDR; then - NVP_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} +function stop_vmware_nsx() { + if ! is_set NSX_GATEWAY_NETWORK_CIDR; then + NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address expected on br-ex was not specified. " - echo "Defaulting to "$NVP_GATEWAY_NETWORK_CIDR + echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR fi - sudo ip addr del $NVP_GATEWAY_NETWORK_CIDR dev $PUBLIC_BRIDGE + sudo ip addr del $NSX_GATEWAY_NETWORK_CIDR dev $PUBLIC_BRIDGE # Save and then flush remaining addresses on the interface addresses=$(ip addr show dev $PUBLIC_BRIDGE | grep inet | awk {'print $2'}) sudo ip addr flush $PUBLIC_BRIDGE # Try to detach physical interface from PUBLIC_BRIDGE - sudo ovs-vsctl del-port $NVP_GATEWAY_NETWORK_INTERFACE - # Restore addresses on NVP_GATEWAY_NETWORK_INTERFACE + sudo ovs-vsctl del-port $NSX_GATEWAY_NETWORK_INTERFACE + # Restore addresses on NSX_GATEWAY_NETWORK_INTERFACE for address in $addresses; do - sudo ip addr add dev $NVP_GATEWAY_NETWORK_INTERFACE $address + sudo ip addr add dev $NSX_GATEWAY_NETWORK_INTERFACE $address done } From 21fe4e76d5453a252e802c5d5f487f88b896decf Mon Sep 17 00:00:00 2001 From: Vincent Hou Date: Thu, 21 Nov 2013 03:10:27 -0500 Subject: [PATCH 0013/4119] Add a flexible API version choice for Cinder, Glance and Heat The version of the authentication url is set to v1.0 for some projects by default. We can make it configurable via the parameter "$IDENTITY_API_VERSION". Closes-Bug: #1253539 Change-Id: I6640e345d1317b1308403c95b13f8a998320241b --- lib/cinder | 2 +- lib/glance | 4 ++-- lib/heat | 2 +- lib/keystone | 8 ++++++++ 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/lib/cinder b/lib/cinder index cbe732e9b0..099cfda7fd 100644 --- a/lib/cinder +++ b/lib/cinder @@ -341,7 +341,7 @@ function configure_cinder() { -e 's/snapshot_autoextend_percent =.*/snapshot_autoextend_percent = 20/' \ /etc/lvm/lvm.conf fi - iniset $CINDER_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT + configure_API_version $CINDER_CONF $IDENTITY_API_VERSION iniset $CINDER_CONF keystone_authtoken admin_user cinder iniset $CINDER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $CINDER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD diff --git a/lib/glance b/lib/glance index 135136db7e..321174e619 100644 --- a/lib/glance +++ b/lib/glance @@ -83,7 +83,7 @@ function configure_glance() { iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL iniset $GLANCE_REGISTRY_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA - iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + configure_API_version $GLANCE_REGISTRY_CONF $IDENTITY_API_VERSION iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_password $SERVICE_PASSWORD @@ -101,7 +101,7 @@ function configure_glance() { iniset $GLANCE_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $GLANCE_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL iniset $GLANCE_API_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA - iniset $GLANCE_API_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + configure_API_version $GLANCE_API_CONF $IDENTITY_API_VERSION iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $GLANCE_API_CONF keystone_authtoken admin_user glance iniset $GLANCE_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD diff --git a/lib/heat b/lib/heat index e44a618162..59fd3d7a7a 100644 --- a/lib/heat +++ b/lib/heat @@ -95,7 +95,7 @@ function configure_heat() { iniset $HEAT_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST iniset $HEAT_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $HEAT_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $HEAT_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + configure_API_version $HEAT_CONF $IDENTITY_API_VERSION iniset $HEAT_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $HEAT_CONF keystone_authtoken admin_user heat diff --git a/lib/keystone b/lib/keystone index 29b9604efe..79f1fd9e84 100644 --- a/lib/keystone +++ b/lib/keystone @@ -335,6 +335,14 @@ create_keystone_accounts() { fi } +# Configure the API version for the OpenStack projects. +# configure_API_version conf_file version +function configure_API_version() { + local conf_file=$1 + local api_version=$2 + iniset $conf_file keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$api_version +} + # init_keystone() - Initialize databases, etc. function init_keystone() { if is_service_enabled ldap; then From 74103f2b3ffd047a4582ae9d37a057534cb6cce7 Mon Sep 17 00:00:00 2001 From: Nikhil Manchanda Date: Fri, 3 Jan 2014 13:53:14 -0800 Subject: [PATCH 0014/4119] Handle trove service availabilty in tempest. Partially implements blueprint: trove-tempest Change-Id: I5413a7afeffe670f6972b41d61dd27ed05da5ba2 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 95b300ce77..08c0553f03 100644 --- a/lib/tempest +++ b/lib/tempest @@ -329,7 +329,7 @@ function configure_tempest() { iniset $TEMPEST_CONF network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}" # service_available - for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna; do + for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna trove; do if is_service_enabled $service ; then iniset $TEMPEST_CONF service_available $service "True" else From def4c141f1d917705ac1dbdbfe7525f47382dada Mon Sep 17 00:00:00 2001 From: Kaitlin Farr Date: Mon, 6 Jan 2014 08:52:49 -0500 Subject: [PATCH 0015/4119] Adds default value for fixed_key Adds a default value for fixed_key, for use by a key manager implementation that reads the key from the configuration settings. This single, fixed key proffers no protection if the key is compromised. The current implementation of the key manager does not work correctly if the key is not set, so including this option is helpful for Tempest testing and volume encryption within DevStack. Implements: blueprint encrypt-cinder-volumes Change-Id: Id83060afc862c793b79b5429355b213cb4c173fd https://blueprints.launchpad.net/nova/+spec/encrypt-cinder-volumes --- stack.sh | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/stack.sh b/stack.sh index 2438f9fffc..558f71a3a4 100755 --- a/stack.sh +++ b/stack.sh @@ -1098,6 +1098,15 @@ if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nov iniset $NOVA_CONF DEFAULT s3_affix_tenant "True" fi +# Create a randomized default value for the keymgr's fixed_key +if is_service_enabled nova; then + FIXED_KEY="" + for i in $(seq 1 64); + do FIXED_KEY+=$(echo "obase=16; $(($RANDOM % 16))" | bc); + done; + iniset $NOVA_CONF keymgr fixed_key "$FIXED_KEY" +fi + if is_service_enabled zeromq; then echo_summary "Starting zermomq receiver" screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver" From 4ad37630a2f938b19697f6e310def046a4dcca48 Mon Sep 17 00:00:00 2001 From: Juan Manuel Olle Date: Mon, 6 Jan 2014 15:07:09 -0300 Subject: [PATCH 0016/4119] Remove duplicated name services Due to the fact that keystone will not allow services with duplicated names, cinder and nova services names were changed Closes-Bug: #1259425 Change-Id: I988aef477b418a289426e02e5e108aa57dd1076b --- lib/cinder | 2 +- lib/nova | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index cbe732e9b0..a0b33c8c88 100644 --- a/lib/cinder +++ b/lib/cinder @@ -385,7 +385,7 @@ create_cinder_accounts() { --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" CINDER_V2_SERVICE=$(keystone service-create \ - --name=cinder \ + --name=cinderv2 \ --type=volumev2 \ --description="Cinder Volume Service V2" \ | grep " id " | get_field 2) diff --git a/lib/nova b/lib/nova index e754341bad..e9f87fce1f 100644 --- a/lib/nova +++ b/lib/nova @@ -338,7 +338,7 @@ create_nova_accounts() { --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" NOVA_V3_SERVICE=$(keystone service-create \ - --name=nova \ + --name=novav3 \ --type=computev3 \ --description="Nova Compute Service V3" \ | grep " id " | get_field 2) From 085abd8eb7c744170cd92429b9aea9d07fd4458b Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 11 Dec 2013 12:21:12 +0000 Subject: [PATCH 0017/4119] Fix xenapi functions' tests The tests got outdated, this fix makes the tests pass again. Change-Id: Iadddfbf34bf79ba455811645e766c2f3d0fcca84 --- tools/xen/mocks | 2 +- tools/xen/test_functions.sh | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/tools/xen/mocks b/tools/xen/mocks index 94b0ca4d02..ec8679e816 100644 --- a/tools/xen/mocks +++ b/tools/xen/mocks @@ -73,7 +73,7 @@ function [ { done return 1 fi - echo "Mock test does not implement the requested function" + echo "Mock test does not implement the requested function: ${1:-}" exit 1 } diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh index 0ae2cb7f9a..14551868e1 100755 --- a/tools/xen/test_functions.sh +++ b/tools/xen/test_functions.sh @@ -118,7 +118,7 @@ function test_zip_snapshot_location { function test_create_directory_for_kernels { ( . mocks - mock_out get_local_sr uuid1 + mock_out get_local_sr_path /var/run/sr-mount/uuid1 create_directory_for_kernels ) @@ -141,7 +141,7 @@ EOF function test_create_directory_for_images { ( . mocks - mock_out get_local_sr uuid1 + mock_out get_local_sr_path /var/run/sr-mount/uuid1 create_directory_for_images ) @@ -199,8 +199,7 @@ function test_get_local_sr { [ "$RESULT" == "uuid123" ] - assert_xe_min - assert_xe_param "sr-list" "name-label=Local storage" + assert_xe_param "pool-list" params=default-SR minimal=true } function test_get_local_sr_path { From 2781f3bfc3e0ceca29457f65adfddb63f01d8059 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 11 Dec 2013 13:41:54 +0000 Subject: [PATCH 0018/4119] Workaround missing zip snapshot At the moment, xenserver installation depends on github snapshots. Unfortunately, git.openstack.org does not have that capability. This fix includes: - Exit with error code, if a download fails - create proper urls, even if they are using the git protocol - set git base to github - so we are able to do snapshots Fixes bug: 1259905 Change-Id: I8d0cf8bf8abb16ee0a4b138a6719409c75e7a146 --- tools/xen/README.md | 3 +++ tools/xen/functions | 15 +++++++++++++-- tools/xen/mocks | 6 +++++- tools/xen/test_functions.sh | 21 +++++++++++++++++---- 4 files changed, 38 insertions(+), 7 deletions(-) diff --git a/tools/xen/README.md b/tools/xen/README.md index 06192ed2b7..ee1abcc091 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -70,6 +70,9 @@ the `XENAPI_PASSWORD` must be your dom0 root password. Of course, use real passwords if this machine is exposed. cat > ./localrc <&2 + exit 1 +} + function xapi_plugin_location { for PLUGIN_DIR in "/etc/xapi.d/plugins/" "/usr/lib/xcp/plugins/" "/usr/lib/xapi/plugins"; do if [ -d $PLUGIN_DIR ]; then @@ -11,7 +20,7 @@ function xapi_plugin_location { } function zip_snapshot_location { - echo $1 | sed "s:\.git$::;s:$:/zipball/$2:g" + echo $1 | sed "s,^git://,http://,g;s:\.git$::;s:$:/zipball/$2:g" } function create_directory_for_kernels { @@ -41,7 +50,9 @@ function extract_remote_zipball { local EXTRACTED_FILES=$(mktemp -d) { - wget -nv $ZIPBALL_URL -O $LOCAL_ZIPBALL --no-check-certificate + if ! wget -nv $ZIPBALL_URL -O $LOCAL_ZIPBALL --no-check-certificate; then + die_with_error "Failed to download [$ZIPBALL_URL]" + fi unzip -q -o $LOCAL_ZIPBALL -d $EXTRACTED_FILES rm -f $LOCAL_ZIPBALL } >&2 diff --git a/tools/xen/mocks b/tools/xen/mocks index ec8679e816..3b9b05c747 100644 --- a/tools/xen/mocks +++ b/tools/xen/mocks @@ -35,7 +35,7 @@ function mktemp { function wget { if [[ $@ =~ "failurl" ]]; then - exit 1 + return 1 fi echo "wget $@" >> $LIST_OF_ACTIONS } @@ -77,6 +77,10 @@ function [ { exit 1 } +function die_with_error { + echo "$1" >> $DEAD_MESSAGES +} + function xe { cat $XE_RESPONSE { diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh index 14551868e1..373d996760 100755 --- a/tools/xen/test_functions.sh +++ b/tools/xen/test_functions.sh @@ -29,6 +29,9 @@ function before_each_test { XE_CALLS=$(mktemp) truncate -s 0 $XE_CALLS + + DEAD_MESSAGES=$(mktemp) + truncate -s 0 $DEAD_MESSAGES } # Teardown @@ -64,6 +67,10 @@ function assert_xe_param { grep -qe "^$1\$" $XE_CALLS } +function assert_died_with { + diff -u <(echo "$1") $DEAD_MESSAGES +} + function mock_out { local FNNAME="$1" local OUTPUT="$2" @@ -109,10 +116,16 @@ function test_no_plugin_directory_found { grep "[ -d /usr/lib/xcp/plugins/ ]" $LIST_OF_ACTIONS } -function test_zip_snapshot_location { +function test_zip_snapshot_location_http { diff \ - <(zip_snapshot_location "git://git.openstack.org/openstack/nova.git" "master") \ - <(echo "git://git.openstack.org/openstack/nova/zipball/master") + <(zip_snapshot_location "http://github.com/openstack/nova.git" "master") \ + <(echo "http://github.com/openstack/nova/zipball/master") +} + +function test_zip_snapsot_location_git { + diff \ + <(zip_snapshot_location "git://github.com/openstack/nova.git" "master") \ + <(echo "http://github.com/openstack/nova/zipball/master") } function test_create_directory_for_kernels { @@ -179,7 +192,7 @@ function test_extract_remote_zipball_wget_fail { local IGNORE IGNORE=$(. mocks && extract_remote_zipball "failurl") - assert_previous_command_failed + assert_died_with "Failed to download [failurl]" } function test_find_nova_plugins { From f93b98ac7309e3ebd106b44843650a161fad4616 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Wed, 8 Jan 2014 18:15:14 +0900 Subject: [PATCH 0019/4119] gitignore: add .localrc.auto and local.conf The changeset of 893e66360caf3bcf0578d4541b3c17d089c33b02, Change-Id of I367cadc86116621e9574ac203aafdab483d810d3 introduced local.conf and generates .localrc.auto. But they aren't in .gitignore. This patch adds them into .gitignore. Change-Id: I7d4dc99d980d9c5b5156cf915646bc96163a3dc4 Closes-Bug: #1267027 --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index c49b4a3287..43652024f3 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,5 @@ accrc devstack-docs-* docs/ docs-files +.localrc.auto +local.conf From 96f8e34c38f172689f09842761dd20600a60fc5a Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Mon, 9 Sep 2013 14:22:07 -0700 Subject: [PATCH 0020/4119] Enable multi-threaded nova-conductor Just like I09f4c6f57e71982b8c7fc92645b3ebec12ff1348, enable multi-threaded nova-conductor. This feature was merged into nova in I8698997d211d7617ee14a1c6113056a694d70620. Change-Id: Id7042284e81bd64092a400d24a3170ce07beb08c --- lib/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/nova b/lib/nova index e754341bad..39685a835a 100644 --- a/lib/nova +++ b/lib/nova @@ -377,6 +377,7 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT osapi_compute_workers "4" iniset $NOVA_CONF DEFAULT ec2_workers "4" iniset $NOVA_CONF DEFAULT metadata_workers "4" + iniset $NOVA_CONF conductor workers "4" iniset $NOVA_CONF DEFAULT sql_connection `database_connection_url nova` iniset $NOVA_CONF DEFAULT fatal_deprecations "True" iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" From 25049cd23de0e8055326c668ff119dd8cdf0bae4 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Thu, 9 Jan 2014 13:53:52 +0100 Subject: [PATCH 0021/4119] Use --tenant-id, not --tenant_id Change-Id: I0e3d65d5b69ac82cbf7ee6ffc41ead369af8c126 --- lib/cinder | 2 +- lib/ironic | 4 ++-- lib/marconi | 2 +- lib/neutron | 14 +++++++------- lib/nova | 2 +- lib/savanna | 2 +- lib/swift | 2 +- lib/trove | 2 +- 8 files changed, 15 insertions(+), 15 deletions(-) diff --git a/lib/cinder b/lib/cinder index cbe732e9b0..fe278f60bb 100644 --- a/lib/cinder +++ b/lib/cinder @@ -365,7 +365,7 @@ create_cinder_accounts() { CINDER_USER=$(keystone user-create \ --name=cinder \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=cinder@example.com \ | grep " id " | get_field 2) keystone user-role-add \ diff --git a/lib/ironic b/lib/ironic index 099746ae22..1ff3c81f06 100644 --- a/lib/ironic +++ b/lib/ironic @@ -149,11 +149,11 @@ create_ironic_accounts() { IRONIC_USER=$(keystone user-create \ --name=ironic \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=ironic@example.com \ | grep " id " | get_field 2) keystone user-role-add \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --user_id $IRONIC_USER \ --role_id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then diff --git a/lib/marconi b/lib/marconi index 742f866e7d..6b9ffdc0b3 100644 --- a/lib/marconi +++ b/lib/marconi @@ -142,7 +142,7 @@ function create_marconi_accounts() { MARCONI_USER=$(get_id keystone user-create --name=marconi \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=marconi@example.com) keystone user-role-add --tenant-id $SERVICE_TENANT \ --user-id $MARCONI_USER \ diff --git a/lib/neutron b/lib/neutron index a7519ad328..43f43f951a 100644 --- a/lib/neutron +++ b/lib/neutron @@ -328,7 +328,7 @@ function create_neutron_accounts() { NEUTRON_USER=$(keystone user-create \ --name=neutron \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=neutron@example.com \ | grep " id " | get_field 2) keystone user-role-add \ @@ -357,7 +357,7 @@ function create_neutron_initial_network() { # Create a small network # Since neutron command is executed in admin context at this point, - # ``--tenant_id`` needs to be specified. + # ``--tenant-id`` needs to be specified. if is_baremetal; then if [[ "$PUBLIC_INTERFACE" == '' || "$OVS_PHYSICAL_BRIDGE" == '' ]]; then die $LINENO "Neutron settings for baremetal not set.. exiting" @@ -367,16 +367,16 @@ function create_neutron_initial_network() { sudo ip addr del $IP dev $PUBLIC_INTERFACE sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE done - NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2) + NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant-id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID" - SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + SUBNET_ID=$(neutron subnet-create --tenant-id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $TENANT_ID" sudo ifconfig $OVS_PHYSICAL_BRIDGE up sudo route add default gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE else - NET_ID=$(neutron net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + NET_ID=$(neutron net-create --tenant-id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID" - SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + SUBNET_ID=$(neutron subnet-create --tenant-id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $TENANT_ID" fi @@ -384,7 +384,7 @@ function create_neutron_initial_network() { # Create a router, and add the private subnet as one of its interfaces if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then # create a tenant-owned router. - ROUTER_ID=$(neutron router-create --tenant_id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2) + ROUTER_ID=$(neutron router-create --tenant-id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2) die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $TENANT_ID $Q_ROUTER_NAME" else # Plugin only supports creating a single router, which should be admin owned. diff --git a/lib/nova b/lib/nova index e754341bad..367ec83072 100644 --- a/lib/nova +++ b/lib/nova @@ -318,7 +318,7 @@ create_nova_accounts() { NOVA_USER=$(keystone user-create \ --name=nova \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=nova@example.com \ | grep " id " | get_field 2) keystone user-role-add \ diff --git a/lib/savanna b/lib/savanna index 6794e36dfd..bb4dfe693d 100644 --- a/lib/savanna +++ b/lib/savanna @@ -56,7 +56,7 @@ function create_savanna_accounts() { SAVANNA_USER=$(keystone user-create \ --name=savanna \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=savanna@example.com \ | grep " id " | get_field 2) keystone user-role-add \ diff --git a/lib/swift b/lib/swift index 96929db557..44c230be93 100644 --- a/lib/swift +++ b/lib/swift @@ -514,7 +514,7 @@ function create_swift_accounts() { ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") SWIFT_USER=$(keystone user-create --name=swift --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT --email=swift@example.com | grep " id " | get_field 2) + --tenant-id $SERVICE_TENANT --email=swift@example.com | grep " id " | get_field 2) keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $SWIFT_USER --role-id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then diff --git a/lib/trove b/lib/trove index f8e3eddfe2..4efdb5d669 100644 --- a/lib/trove +++ b/lib/trove @@ -64,7 +64,7 @@ create_trove_accounts() { TROVE_USER=$(keystone user-create \ --name=trove \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=trove@example.com \ | grep " id " | get_field 2) keystone user-role-add --tenant-id $SERVICE_TENANT \ From 72dc98ed6bcdaa1cdd81c1b655b5cbdf5490291d Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Thu, 9 Jan 2014 21:57:22 +0900 Subject: [PATCH 0022/4119] Correct Qpid package name in files/apts/neutron Ubuntu qpid server package is named as "qpidd", but files/apts/neutron has an entry "qpid". Change-Id: Ie3f8391a7404bdeb222acfcce77ca80a14ea8693 Closes-Bug: #1267459 --- files/apts/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/apts/neutron b/files/apts/neutron index 0f4b69f8ef..4e9f0f7dfd 100644 --- a/files/apts/neutron +++ b/files/apts/neutron @@ -20,6 +20,6 @@ python-qpid # dist:precise dnsmasq-base dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise,quantal rabbitmq-server # NOPRIME -qpid # NOPRIME +qpidd # NOPRIME sqlite3 vlan From fa5ccfff1098bb85eb7810ad5146fbdfee83fb15 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Thu, 9 Jan 2014 13:27:35 +0100 Subject: [PATCH 0023/4119] Setup Keystone catalog information for Ceilometer Change-Id: I3f536f38fe7862ee41b06d1d48b848cc07492c8d Closes-Bug: #1267322 --- files/default_catalog.templates | 5 ++++ lib/ceilometer | 42 +++++++++++++++++++++++++++++++++ stack.sh | 4 ++++ 3 files changed, 51 insertions(+) diff --git a/files/default_catalog.templates b/files/default_catalog.templates index 277904a8e3..430c42a337 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -51,3 +51,8 @@ catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8000/v1 catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8000/v1 catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8000/v1 catalog.RegionOne.orchestration.name = Heat Service + +catalog.RegionOne.metering.publicURL = http://%SERVICE_HOST%:8777/v1 +catalog.RegionOne.metering.adminURL = http://%SERVICE_HOST%:8777/v1 +catalog.RegionOne.metering.internalURL = http://%SERVICE_HOST%:8777/v1 +catalog.RegionOne.metering.name = Telemetry Service diff --git a/lib/ceilometer b/lib/ceilometer index fac3be14a9..fe72fcdb11 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -48,8 +48,50 @@ CEILOMETER_BIN_DIR=$(get_python_exec_prefix) # Set up database backend CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql} +# Ceilometer connection info. +CEILOMETER_SERVICE_PROTOCOL=http +CEILOMETER_SERVICE_HOST=$SERVICE_HOST +CEILOMETER_SERVICE_PORT=${CEILOMETER_SERVICE_PORT:-8777} +# + # Functions # --------- +# +# create_ceilometer_accounts() - Set up common required ceilometer accounts + +create_ceilometer_accounts() { + + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + # Ceilometer + if [[ "$ENABLED_SERVICES" =~ "ceilometer-api" ]]; then + CEILOMETER_USER=$(keystone user-create \ + --name=ceilometer \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=ceilometer@example.com \ + | grep " id " | get_field 2) + keystone user-role-add \ + --tenant-id $SERVICE_TENANT \ + --user-id $CEILOMETER_USER \ + --role-id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + CEILOMETER_SERVICE=$(keystone service-create \ + --name=ceilometer \ + --type=metering \ + --description="OpenStack Telemetry Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $CEILOMETER_SERVICE \ + --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ + --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ + --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" + fi + fi +} + # cleanup_ceilometer() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up diff --git a/stack.sh b/stack.sh index 2438f9fffc..bf782bc047 100755 --- a/stack.sh +++ b/stack.sh @@ -901,6 +901,10 @@ if is_service_enabled key; then create_trove_accounts fi + if is_service_enabled ceilometer; then + create_ceilometer_accounts + fi + if is_service_enabled swift || is_service_enabled s-proxy; then create_swift_accounts fi From 6681a4fae9df92cee77900f2248b8e98c501626f Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 10 Jan 2014 15:28:29 +0900 Subject: [PATCH 0024/4119] bash8: fix bash8 warning This patch removes the following bash8 warnings. > /devstack/ $ ./run_tests.sh > Running bash8... > E003: Indent not multiple of 4: ' wget -c $image_url -O $FILES/$IMAGE_FNAME' > - functions: L1367 > E003: Indent not multiple of 4: ' if [[ $? -ne 0 ]]; then' > - functions: L1368 > E003: Indent not multiple of 4: ' echo "Not found: $image_url"' > - functions: L1369 > E003: Indent not multiple of 4: ' return' > - functions: L1370 > E003: Indent not multiple of 4: ' fi' > - functions: L1371 > E003: Indent not multiple of 4: ' `"should use a descriptor-data pair."' > - functions: L1423 > E003: Indent not multiple of 4: ' `" Attempt to retrieve the *-flat.vmdk: $flat_url"' > - functions: L1438 > E003: Indent not multiple of 4: ' `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"' > - functions: L1477 > E003: Indent not multiple of 4: ' warn $LINENO "Descriptor not found $descriptor_url"' > - functions: L1492 > E003: Indent not multiple of 4: ' descriptor_found=false' > - functions: L1493 > E003: Indent not multiple of 4: ' fi' > - functions: L1501 > E003: Indent not multiple of 4: ' fi' > - functions: L1502 > E003: Indent not multiple of 4: ' #TODO(alegendre): handle streamOptimized once supported by the VMware driver.' > - functions: L1503 > E003: Indent not multiple of 4: ' vmdk_disktype="preallocated"' > - functions: L1504 > 14 bash8 error(s) found Change-Id: Icf2cddf283192a50253ccfa697c2d32eec75b4ba Closes-Bug: #1267716 --- functions | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/functions b/functions index e79e1d58af..6f09685efb 100644 --- a/functions +++ b/functions @@ -1364,11 +1364,11 @@ function upload_image() { if [[ $image_url != file* ]]; then # Downloads the image (uec ami+aki style), then extracts it. if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then - wget -c $image_url -O $FILES/$IMAGE_FNAME - if [[ $? -ne 0 ]]; then - echo "Not found: $image_url" - return - fi + wget -c $image_url -O $FILES/$IMAGE_FNAME + if [[ $? -ne 0 ]]; then + echo "Not found: $image_url" + return + fi fi IMAGE="$FILES/${IMAGE_FNAME}" else @@ -1420,7 +1420,7 @@ function upload_image() { vmdk_create_type="${vmdk_create_type%?}" descriptor_data_pair_msg="Monolithic flat and VMFS disks "` - `"should use a descriptor-data pair." + `"should use a descriptor-data pair." if [[ "$vmdk_create_type" = "monolithicSparse" ]]; then vmdk_disktype="sparse" elif [[ "$vmdk_create_type" = "monolithicFlat" || \ @@ -1435,7 +1435,7 @@ function upload_image() { path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` flat_url="${image_url:0:$path_len}$flat_fname" warn $LINENO "$descriptor_data_pair_msg"` - `" Attempt to retrieve the *-flat.vmdk: $flat_url" + `" Attempt to retrieve the *-flat.vmdk: $flat_url" if [[ $flat_url != file* ]]; then if [[ ! -f $FILES/$flat_fname || \ "$(stat -c "%s" $FILES/$flat_fname)" = "0" ]]; then @@ -1474,7 +1474,7 @@ function upload_image() { flat_path="${image_url:0:$path_len}" descriptor_url=$flat_path$descriptor_fname warn $LINENO "$descriptor_data_pair_msg"` - `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" + `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" if [[ $flat_path != file* ]]; then if [[ ! -f $FILES/$descriptor_fname || \ "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then @@ -1489,8 +1489,8 @@ function upload_image() { descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g") if [[ ! -f $descriptor_url || \ "$(stat -c "%s" $descriptor_url)" == "0" ]]; then - warn $LINENO "Descriptor not found $descriptor_url" - descriptor_found=false + warn $LINENO "Descriptor not found $descriptor_url" + descriptor_found=false fi fi if $descriptor_found; then @@ -1498,10 +1498,10 @@ function upload_image() { `"grep -a -F -m 1 'ddb.adapterType =' $descriptor_url)" vmdk_adapter_type="${vmdk_adapter_type#*\"}" vmdk_adapter_type="${vmdk_adapter_type%?}" - fi - fi - #TODO(alegendre): handle streamOptimized once supported by the VMware driver. - vmdk_disktype="preallocated" + fi + fi + #TODO(alegendre): handle streamOptimized once supported by the VMware driver. + vmdk_disktype="preallocated" else #TODO(alegendre): handle streamOptimized once supported by the VMware driver. vmdk_disktype="preallocated" From d7f6090f29786f091773497bc3597142d94619ec Mon Sep 17 00:00:00 2001 From: Alvaro Lopez Ortega Date: Sun, 22 Dec 2013 17:03:47 +0100 Subject: [PATCH 0025/4119] Add support for Fedora 20 The list of RPM packages have been updated to support the recently released Fedora 20 distribution. Closes-Bug: #1263291 Co-Authored: Alvaro Lopez Ortega Change-Id: Ia66abef1a1a54e6d5ee6eebc12908cef3f1d211d --- files/rpms/cinder | 1 + files/rpms/general | 1 + files/rpms/glance | 5 +++-- files/rpms/horizon | 4 ++-- files/rpms/keystone | 10 +++++----- files/rpms/neutron | 4 ++-- files/rpms/nova | 6 +++--- files/rpms/swift | 2 +- files/rpms/tempest | 2 +- files/rpms/trove | 2 +- stack.sh | 4 ++-- 11 files changed, 22 insertions(+), 19 deletions(-) diff --git a/files/rpms/cinder b/files/rpms/cinder index c4edb68f14..623c13e676 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -4,3 +4,4 @@ qemu-img python-devel postgresql-devel iscsi-initiator-utils +python-lxml #dist:f18,f19,f20 diff --git a/files/rpms/general b/files/rpms/general index 2db31d1db0..40246ea4ab 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -20,6 +20,7 @@ tar tcpdump unzip wget +which # [1] : some of installed tools have unversioned dependencies on this, # but others have versioned (<=0.7). So if a later version (0.7.1) diff --git a/files/rpms/glance b/files/rpms/glance index dd66171f7a..fffd9c85b4 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -1,6 +1,6 @@ gcc libffi-devel # testonly -libxml2-devel +libxml2-devel # testonly libxslt-devel # testonly mysql-devel # testonly openssl-devel # testonly @@ -9,7 +9,8 @@ python-argparse python-devel python-eventlet python-greenlet -python-paste-deploy #dist:f16,f17,f18,f19 +python-lxml #dist:f18,f19,f20 +python-paste-deploy #dist:f18,f19,f20 python-routes python-sqlalchemy python-wsgiref diff --git a/files/rpms/horizon b/files/rpms/horizon index aa27ab4e97..59503cc9aa 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -16,8 +16,8 @@ python-kombu python-migrate python-mox python-nose -python-paste #dist:f16,f17,f18,f19 -python-paste-deploy #dist:f16,f17,f18,f19 +python-paste #dist:f18,f19,f20 +python-paste-deploy #dist:f18,f19,f20 python-routes python-sphinx python-sqlalchemy diff --git a/files/rpms/keystone b/files/rpms/keystone index 52dbf477d8..99e8524628 100644 --- a/files/rpms/keystone +++ b/files/rpms/keystone @@ -1,11 +1,11 @@ python-greenlet -python-lxml #dist:f16,f17,f18,f19 -python-paste #dist:f16,f17,f18,f19 -python-paste-deploy #dist:f16,f17,f18,f19 -python-paste-script #dist:f16,f17,f18,f19 +libxslt-devel # dist:f20 +python-lxml #dist:f18,f19,f20 +python-paste #dist:f18,f19,f20 +python-paste-deploy #dist:f18,f19,f20 +python-paste-script #dist:f18,f19,f20 python-routes python-sqlalchemy -python-sqlite2 python-webob sqlite diff --git a/files/rpms/neutron b/files/rpms/neutron index a7700f77d4..67bf52350a 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron @@ -11,8 +11,8 @@ python-greenlet python-iso8601 python-kombu #rhel6 gets via pip -python-paste # dist:f16,f17,f18,f19 -python-paste-deploy # dist:f16,f17,f18,f19 +python-paste # dist:f18,f19,f20 +python-paste-deploy # dist:f18,f19,f20 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/nova b/files/rpms/nova index c99f3defc8..ac70ac5d6f 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -28,11 +28,11 @@ python-kombu python-lockfile python-migrate python-mox -python-paramiko # dist:f16,f17,f18,f19 +python-paramiko # dist:f18,f19,f20 # ^ on RHEL, brings in python-crypto which conflicts with version from # pip we need -python-paste # dist:f16,f17,f18,f19 -python-paste-deploy # dist:f16,f17,f18,f19 +python-paste # dist:f18,f19,f20 +python-paste-deploy # dist:f18,f19,f20 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/swift b/files/rpms/swift index b137f30dce..32432bca9b 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -9,7 +9,7 @@ python-eventlet python-greenlet python-netifaces python-nose -python-paste-deploy # dist:f16,f17,f18,f19 +python-paste-deploy # dist:f18,f19,f20 python-simplejson python-webob pyxattr diff --git a/files/rpms/tempest b/files/rpms/tempest index de32b81504..e7bbd43cd6 100644 --- a/files/rpms/tempest +++ b/files/rpms/tempest @@ -1 +1 @@ -libxslt-dev \ No newline at end of file +libxslt-devel diff --git a/files/rpms/trove b/files/rpms/trove index 09dcee8104..c5cbdea012 100644 --- a/files/rpms/trove +++ b/files/rpms/trove @@ -1 +1 @@ -libxslt1-dev # testonly +libxslt-devel # testonly diff --git a/stack.sh b/stack.sh index ce5fbd47e5..4e12c45523 100755 --- a/stack.sh +++ b/stack.sh @@ -12,7 +12,7 @@ # developer install. # To keep this script simple we assume you are running on a recent **Ubuntu** -# (12.04 Precise or newer) or **Fedora** (F16 or newer) machine. (It may work +# (12.04 Precise or newer) or **Fedora** (F18 or newer) machine. (It may work # on other platforms but support for those platforms is left to those who added # them to DevStack.) It should work in a VM or physical server. Additionally # we maintain a list of ``apt`` and ``rpm`` dependencies and other configuration @@ -131,7 +131,7 @@ disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f16|f17|f18|f19|opensuse-12.2|rhel6) ]]; then +if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f18|f19|f20|opensuse-12.2|rhel6) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 1b0eccdf75cf70a26c1b2ae6b9beaa75ebaf7a6a Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Fri, 10 Jan 2014 11:51:01 +0100 Subject: [PATCH 0026/4119] Fix Heat/Cloud formation catalog template Cloud formation and Heat API ports where mixed. Change-Id: I029592c4821bb93c8a1dd91519f30908efd56627 Closes-Bug: #1267355 --- files/default_catalog.templates | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/files/default_catalog.templates b/files/default_catalog.templates index 277904a8e3..debcedfb5b 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -47,7 +47,12 @@ catalog.RegionOne.image.adminURL = http://%SERVICE_HOST%:9292 catalog.RegionOne.image.internalURL = http://%SERVICE_HOST%:9292 catalog.RegionOne.image.name = Image Service -catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8000/v1 -catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8000/v1 -catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.cloudformation.publicURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.cloudformation.adminURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.cloudformation.internalURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.cloudformation.name = Heat CloudFormation Service + +catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s +catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s +catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s catalog.RegionOne.orchestration.name = Heat Service From f69c6f16d21ce51eb5939ea6fecd99a8b28b426b Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 9 Jan 2014 19:47:54 -0500 Subject: [PATCH 0027/4119] Enable server-side and client-side logs for libvirt Need this to diagnose libvirt Errors in the gate Change-Id: Id46137a71d17abc8bfab66b14ab567d81a31f018 Related-Bug: #1254872 --- lib/nova | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/nova b/lib/nova index e754341bad..162212da59 100644 --- a/lib/nova +++ b/lib/nova @@ -648,6 +648,14 @@ function start_nova_compute() { fi if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + # Enable client side traces for libvirt + export LIBVIRT_LOG_FILTERS="1:libvirt" + export LIBVIRT_LOG_OUTPUTS="1:file:/var/log/libvirt/libvirtd-nova.log" + + # Enable server side traces for libvirtd + echo "log_filters=\"1:libvirt 1:qemu\"" | sudo tee -a /etc/libvirt/libvirtd.conf + echo "log_outputs=\"1:file:/var/log/libvirt/libvirtd.log\"" | sudo tee -a /etc/libvirt/libvirtd.conf + # The group **$LIBVIRT_GROUP** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'" From 63e1784354a49ca45bb4ae9465d2cb6dfb31db12 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Fri, 10 Jan 2014 14:23:03 +0100 Subject: [PATCH 0028/4119] Put cinder rootwrap config in separate function Separate out Cinder's rootwrap configuration so that it can be called from Grenade's upgrade scripts. This follows the same model as Nova uses with configure_nova_rootwrap() which can be called from Grenade to refresh rootwrap config. Change-Id: Id808abc2b5754443362b3de4b3453e305d3720f3 --- lib/cinder | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/lib/cinder b/lib/cinder index cbe732e9b0..6f5fb188c9 100644 --- a/lib/cinder +++ b/lib/cinder @@ -163,15 +163,8 @@ function cleanup_cinder() { fi } -# configure_cinder() - Set config files, create data dirs, etc -function configure_cinder() { - if [[ ! -d $CINDER_CONF_DIR ]]; then - sudo mkdir -p $CINDER_CONF_DIR - fi - sudo chown $STACK_USER $CINDER_CONF_DIR - - cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR - +# configure_cinder_rootwrap() - configure Cinder's rootwrap +function configure_cinder_rootwrap() { # Set the paths of certain binaries CINDER_ROOTWRAP=$(get_rootwrap_location cinder) if [[ ! -x $CINDER_ROOTWRAP ]]; then @@ -214,6 +207,18 @@ function configure_cinder() { chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap +} + +# configure_cinder() - Set config files, create data dirs, etc +function configure_cinder() { + if [[ ! -d $CINDER_CONF_DIR ]]; then + sudo mkdir -p $CINDER_CONF_DIR + fi + sudo chown $STACK_USER $CINDER_CONF_DIR + + cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR + + configure_cinder_rootwrap cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI From 9fc8792b0ac7525b4c353b0a55b8b80eabf76e2a Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 22 May 2013 17:19:06 -0500 Subject: [PATCH 0029/4119] Robustify service shutdown * Save PID when using screen in screen_it() * Add screen_stop() * Call out service stop_*() in unstack.sh functions so screen_stop() can do its thing Closes-bug: 1183449 Change-Id: Iac84231cfda960c4197de5b6e8ba6eb19225169a --- functions | 33 +++++++++++++++++++++++++++++++-- lib/ceilometer | 2 +- lib/cinder | 2 +- lib/glance | 4 ++-- lib/heat | 2 +- lib/keystone | 2 +- lib/nova | 2 +- lib/trove | 2 +- stackrc | 3 +++ unstack.sh | 47 ++++++++++++++++++++++++++++++++++------------- 10 files changed, 76 insertions(+), 23 deletions(-) diff --git a/functions b/functions index 6f09685efb..92b61ed974 100644 --- a/functions +++ b/functions @@ -1132,10 +1132,39 @@ function screen_it { sleep 1.5 NL=`echo -ne '\015'` - screen -S $SCREEN_NAME -p $1 -X stuff "$2 || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" + # This fun command does the following: + # - the passed server command is backgrounded + # - the pid of the background process is saved in the usual place + # - the server process is brought back to the foreground + # - if the server process exits prematurely the fg command errors + # and a message is written to stdout and the service failure file + # The pid saved can be used in screen_stop() as a process group + # id to kill off all child processes + screen -S $SCREEN_NAME -p $1 -X stuff "$2 & echo \$! >$SERVICE_DIR/$SCREEN_NAME/$1.pid; fg || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" else # Spawn directly without screen - run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$service.pid + run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid + fi + fi +} + + +# Stop a service in screen +# screen_stop service +function screen_stop() { + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + USE_SCREEN=$(trueorfalse True $USE_SCREEN) + + if is_service_enabled $1; then + # Kill via pid if we have one available + if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then + pkill -TERM -P $(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid) + rm $SERVICE_DIR/$SCREEN_NAME/$1.pid + fi + if [[ "$USE_SCREEN" = "True" ]]; then + # Clean up the screen window + screen -S $SCREEN_NAME -p $1 -X kill fi fi } diff --git a/lib/ceilometer b/lib/ceilometer index fac3be14a9..211303f57c 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -162,7 +162,7 @@ function start_ceilometer() { function stop_ceilometer() { # Kill the ceilometer screen windows for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do - screen -S $SCREEN_NAME -p $serv -X kill + screen_stop $serv done } diff --git a/lib/cinder b/lib/cinder index cbe732e9b0..11414bedd3 100644 --- a/lib/cinder +++ b/lib/cinder @@ -556,7 +556,7 @@ function start_cinder() { function stop_cinder() { # Kill the cinder screen windows for serv in c-api c-bak c-sch c-vol; do - screen -S $SCREEN_NAME -p $serv -X kill + screen_stop $serv done if is_service_enabled c-vol; then diff --git a/lib/glance b/lib/glance index 135136db7e..80868ae5c5 100644 --- a/lib/glance +++ b/lib/glance @@ -206,8 +206,8 @@ function start_glance() { # stop_glance() - Stop running processes function stop_glance() { # Kill the Glance screen windows - screen -S $SCREEN_NAME -p g-api -X kill - screen -S $SCREEN_NAME -p g-reg -X kill + screen_stop g-api + screen_stop g-reg } diff --git a/lib/heat b/lib/heat index e44a618162..29cd967fe1 100644 --- a/lib/heat +++ b/lib/heat @@ -175,7 +175,7 @@ function start_heat() { function stop_heat() { # Kill the screen windows for serv in h-eng h-api h-api-cfn h-api-cw; do - screen -S $SCREEN_NAME -p $serv -X kill + screen_stop $serv done } diff --git a/lib/keystone b/lib/keystone index 29b9604efe..dc6a730f16 100644 --- a/lib/keystone +++ b/lib/keystone @@ -421,7 +421,7 @@ function start_keystone() { # stop_keystone() - Stop running processes function stop_keystone() { # Kill the Keystone screen window - screen -S $SCREEN_NAME -p key -X kill + screen_stop key } diff --git a/lib/nova b/lib/nova index 39685a835a..178f8ee19c 100644 --- a/lib/nova +++ b/lib/nova @@ -705,7 +705,7 @@ function stop_nova() { # Some services are listed here twice since more than one instance # of a service may be running in certain configs. for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta; do - screen -S $SCREEN_NAME -p $serv -X kill + screen_stop $serv done if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then stop_nova_hypervisor diff --git a/lib/trove b/lib/trove index f8e3eddfe2..870afbe7bd 100644 --- a/lib/trove +++ b/lib/trove @@ -198,7 +198,7 @@ function start_trove() { function stop_trove() { # Kill the trove screen windows for serv in tr-api tr-tmgr tr-cond; do - screen -S $SCREEN_NAME -p $serv -X kill + screen_stop $serv done } diff --git a/stackrc b/stackrc index 3fdc566ed2..49fb26b2c7 100644 --- a/stackrc +++ b/stackrc @@ -9,6 +9,9 @@ DEST=/opt/stack # Destination for working data DATA_DIR=${DEST}/data +# Destination for status files +SERVICE_DIR=${DEST}/status + # Determine stack user if [[ $EUID -eq 0 ]]; then STACK_USER=stack diff --git a/unstack.sh b/unstack.sh index 67c8b7c7b1..77dbe074d2 100755 --- a/unstack.sh +++ b/unstack.sh @@ -36,6 +36,9 @@ source $TOP_DIR/lib/apache # Get project function libraries source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/keystone +source $TOP_DIR/lib/glance +source $TOP_DIR/lib/nova source $TOP_DIR/lib/horizon source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron @@ -75,21 +78,29 @@ if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then teardown_neutron_debug fi -# Shut down devstack's screen to get the bulk of OpenStack services in one shot -SCREEN=$(which screen) -if [[ -n "$SCREEN" ]]; then - SESSION=$(screen -ls | awk '/[0-9].stack/ { print $1 }') - if [[ -n "$SESSION" ]]; then - screen -X -S $SESSION quit - fi +# Call service stop +if is_service_enabled trove; then + stop_trove +fi + +if is_service_enabled heat; then + stop_heat fi -# Shut down Nova hypervisor plugins after Nova -NOVA_PLUGINS=$TOP_DIR/lib/nova_plugins -if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then - # Load plugin - source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER - stop_nova_hypervisor +if is_service_enabled ceilometer; then + stop_ceilometer +fi + +if is_service_enabled nova; then + stop_nova +fi + +if is_service_enabled g-api g-reg; then + stop_glance +fi + +if is_service_enabled key; then + stop_keystone fi # Swift runs daemons @@ -123,6 +134,7 @@ SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* # Get the iSCSI volumes if is_service_enabled cinder; then + stop_cinder cleanup_cinder fi @@ -152,4 +164,13 @@ if is_service_enabled trove; then cleanup_trove fi +# Clean up the remainder of the screen processes +SCREEN=$(which screen) +if [[ -n "$SCREEN" ]]; then + SESSION=$(screen -ls | awk '/[0-9].stack/ { print $1 }') + if [[ -n "$SESSION" ]]; then + screen -X -S $SESSION quit + fi +fi + cleanup_tmp From 2bb483d32ec0876f071550a3fc755436d1661681 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 3 Jan 2014 09:41:27 -0500 Subject: [PATCH 0030/4119] clean up ubuntu versions oneiric is long dead, remove references to it whenever possible (one more subtle issue in cinder should be a seperate patch). This includes removing the oneiric only tool build_uec.sh. also remove the bulk of references to quantal, which is 8 months out of support. note: raring only has support for the rest of the month. Change-Id: Ib17502be7572af76dc95560615221b48b970a547 --- files/apts/cinder | 2 +- files/apts/glance | 1 - files/apts/n-cpu | 2 +- files/apts/neutron | 2 +- files/apts/tls-proxy | 2 +- lib/rpc_backend | 5 +- stack.sh | 3 +- tools/build_uec.sh | 302 ----------------------------------------- tools/get_uec_image.sh | 6 +- 9 files changed, 9 insertions(+), 316 deletions(-) delete mode 100755 tools/build_uec.sh diff --git a/files/apts/cinder b/files/apts/cinder index f8e3b6d06d..712fee99ec 100644 --- a/files/apts/cinder +++ b/files/apts/cinder @@ -4,4 +4,4 @@ qemu-utils libpq-dev python-dev open-iscsi -open-iscsi-utils # Deprecated since quantal dist:lucid,oneiric,precise +open-iscsi-utils # Deprecated since quantal dist:precise diff --git a/files/apts/glance b/files/apts/glance index 26826a53c7..22787bc5a2 100644 --- a/files/apts/glance +++ b/files/apts/glance @@ -9,7 +9,6 @@ python-dev python-eventlet python-routes python-greenlet -python-argparse # dist:oneiric python-sqlalchemy python-wsgiref python-pastedeploy diff --git a/files/apts/n-cpu b/files/apts/n-cpu index 88e0144079..29e37603b7 100644 --- a/files/apts/n-cpu +++ b/files/apts/n-cpu @@ -2,7 +2,7 @@ nbd-client lvm2 open-iscsi -open-iscsi-utils # Deprecated since quantal dist:lucid,oneiric,precise +open-iscsi-utils # Deprecated since quantal dist:precise genisoimage sysfsutils sg3-utils diff --git a/files/apts/neutron b/files/apts/neutron index 0f4b69f8ef..5760113c8c 100644 --- a/files/apts/neutron +++ b/files/apts/neutron @@ -18,7 +18,7 @@ python-mysqldb python-pyudev python-qpid # dist:precise dnsmasq-base -dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise,quantal +dnsmasq-utils # for dhcp_release only available in dist:precise rabbitmq-server # NOPRIME qpid # NOPRIME sqlite3 diff --git a/files/apts/tls-proxy b/files/apts/tls-proxy index 0a44015925..8fca42d124 100644 --- a/files/apts/tls-proxy +++ b/files/apts/tls-proxy @@ -1 +1 @@ -stud # only available in dist:precise,quantal +stud # only available in dist:precise diff --git a/lib/rpc_backend b/lib/rpc_backend index ae83e85e89..f59c80096f 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -192,9 +192,8 @@ function qpid_is_supported() { GetDistro fi - # Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is - # not in openSUSE either right now. - ( ! ([[ "$DISTRO" = "oneiric" ]] || is_suse) ) + # Qpid is not in openSUSE + ( ! is_suse ) } diff --git a/stack.sh b/stack.sh index 7c065719c4..c303dc3927 100755 --- a/stack.sh +++ b/stack.sh @@ -131,7 +131,7 @@ disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f18|f19|f20|opensuse-12.2|rhel6) ]]; then +if [[ ! ${DISTRO} =~ (precise|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f18|f19|f20|opensuse-12.2|rhel6) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" @@ -1203,7 +1203,6 @@ fi # See https://help.ubuntu.com/community/CloudInit for more on cloud-init # # Override ``IMAGE_URLS`` with a comma-separated list of UEC images. -# * **oneiric**: http://uec-images.ubuntu.com/oneiric/current/oneiric-server-cloudimg-amd64.tar.gz # * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz if is_service_enabled g-reg; then diff --git a/tools/build_uec.sh b/tools/build_uec.sh deleted file mode 100755 index bce051a0b7..0000000000 --- a/tools/build_uec.sh +++ /dev/null @@ -1,302 +0,0 @@ -#!/usr/bin/env bash - -# **build_uec.sh** - -# Make sure that we have the proper version of ubuntu (only works on oneiric) -if ! egrep -q "oneiric" /etc/lsb-release; then - echo "This script only works with ubuntu oneiric." - exit 1 -fi - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $TOOLS_DIR/..; pwd) - -# Import common functions -. $TOP_DIR/functions - -cd $TOP_DIR - -# Source params -source ./stackrc - -# Ubuntu distro to install -DIST_NAME=${DIST_NAME:-oneiric} - -# Configure how large the VM should be -GUEST_SIZE=${GUEST_SIZE:-10G} - -# exit on error to stop unexpected errors -set -o errexit -set -o xtrace - -# Abort if localrc is not set -if [ ! -e $TOP_DIR/localrc ]; then - echo "You must have a localrc with ALL necessary passwords defined before proceeding." - echo "See stack.sh for required passwords." - exit 1 -fi - -# Install deps if needed -DEPS="kvm libvirt-bin kpartx cloud-utils curl" -apt_get install -y --force-yes $DEPS || true # allow this to fail gracefully for concurrent builds - -# Where to store files and instances -WORK_DIR=${WORK_DIR:-/opt/uecstack} - -# Where to store images -image_dir=$WORK_DIR/images/$DIST_NAME -mkdir -p $image_dir - -# Start over with a clean base image, if desired -if [ $CLEAN_BASE ]; then - rm -f $image_dir/disk -fi - -# Get the base image if it does not yet exist -if [ ! -e $image_dir/disk ]; then - $TOOLS_DIR/get_uec_image.sh -r $GUEST_SIZE $DIST_NAME $image_dir/disk $image_dir/kernel -fi - -# Copy over dev environment if COPY_ENV is set. -# This will also copy over your current devstack. -if [ $COPY_ENV ]; then - cd $TOOLS_DIR - ./copy_dev_environment_to_uec.sh $image_dir/disk -fi - -# Option to warm the base image with software requirements. -if [ $WARM_CACHE ]; then - cd $TOOLS_DIR - ./warm_apts_for_uec.sh $image_dir/disk -fi - -# Name of our instance, used by libvirt -GUEST_NAME=${GUEST_NAME:-devstack} - -# Mop up after previous runs -virsh destroy $GUEST_NAME || true - -# Where this vm is stored -vm_dir=$WORK_DIR/instances/$GUEST_NAME - -# Create vm dir and remove old disk -mkdir -p $vm_dir -rm -f $vm_dir/disk - -# Create a copy of the base image -qemu-img create -f qcow2 -b $image_dir/disk $vm_dir/disk - -# Back to devstack -cd $TOP_DIR - -GUEST_NETWORK=${GUEST_NETWORK:-1} -GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes} -GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50} -GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24} -GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0} -GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.$GUEST_NETWORK.1} -GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $GUEST_NETWORK`"} -GUEST_RAM=${GUEST_RAM:-1524288} -GUEST_CORES=${GUEST_CORES:-1} - -# libvirt.xml configuration -NET_XML=$vm_dir/net.xml -NET_NAME=${NET_NAME:-devstack-$GUEST_NETWORK} -cat > $NET_XML < - $NET_NAME - - - - - - - - -EOF - -if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then - virsh net-destroy $NET_NAME || true - # destroying the network isn't enough to delete the leases - rm -f /var/lib/libvirt/dnsmasq/$NET_NAME.leases - virsh net-create $vm_dir/net.xml -fi - -# libvirt.xml configuration -LIBVIRT_XML=$vm_dir/libvirt.xml -cat > $LIBVIRT_XML < - $GUEST_NAME - $GUEST_RAM - - hvm - $image_dir/kernel - root=/dev/vda ro console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud-net;s=http://192.168.$GUEST_NETWORK.1:4567/ ubuntu-pass=ubuntu - - - - - - $GUEST_CORES - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -EOF - - -rm -rf $vm_dir/uec -cp -r $TOOLS_DIR/uec $vm_dir/uec - -# set metadata -cat > $vm_dir/uec/meta-data< $vm_dir/uec/user-data<> $vm_dir/uec/user-data< localrc < /opt/stack/.ssh/authorized_keys -chown -R $STACK_USER /opt/stack -chmod 700 /opt/stack/.ssh -chmod 600 /opt/stack/.ssh/authorized_keys - -grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || - echo "#includedir /etc/sudoers.d" >> /etc/sudoers -( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \ - > /etc/sudoers.d/50_stack_sh ) -EOF -fi - -# Run stack.sh -cat >> $vm_dir/uec/user-data< Date: Sun, 12 Jan 2014 19:35:43 +0000 Subject: [PATCH 0031/4119] Skip Nova exercises if Nova is not enabled This allows for ./exercises.sh to complete sucessfully when nova is not enabled / installed. Change-Id: If969e14f5106c15007146e8fad1da27d131828c8 --- exercises/aggregates.sh | 4 ++++ exercises/bundle.sh | 4 ++++ exercises/euca.sh | 4 ++++ exercises/floating_ips.sh | 4 ++++ exercises/sec_groups.sh | 4 ++++ 5 files changed, 20 insertions(+) diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index 1b1ac06678..d223301f35 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -43,6 +43,10 @@ source $TOP_DIR/exerciserc # Test as the admin user . $TOP_DIR/openrc admin admin +# If nova api is not enabled we exit with exitcode 55 so that +# the exercise is skipped +is_service_enabled n-api || exit 55 + # Cells does not support aggregates. is_service_enabled n-cell && exit 55 diff --git a/exercises/bundle.sh b/exercises/bundle.sh index b83678ab1f..5470960b91 100755 --- a/exercises/bundle.sh +++ b/exercises/bundle.sh @@ -39,6 +39,10 @@ rm -f $TOP_DIR/cacert.pem rm -f $TOP_DIR/cert.pem rm -f $TOP_DIR/pk.pem +# If nova api is not enabled we exit with exitcode 55 so that +# the exercise is skipped +is_service_enabled n-api || exit 55 + # Get Certificates nova x509-get-root-cert $TOP_DIR/cacert.pem nova x509-create-cert $TOP_DIR/pk.pem $TOP_DIR/cert.pem diff --git a/exercises/euca.sh b/exercises/euca.sh index ed521e4f7f..51b2644458 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -41,6 +41,10 @@ fi # Import exercise configuration source $TOP_DIR/exerciserc +# If nova api is not enabled we exit with exitcode 55 so that +# the exercise is skipped +is_service_enabled n-api || exit 55 + # Skip if the hypervisor is Docker [[ "$VIRT_DRIVER" == "docker" ]] && exit 55 diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 7055278f35..4ca90a5c35 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -38,6 +38,10 @@ fi # Import exercise configuration source $TOP_DIR/exerciserc +# If nova api is not enabled we exit with exitcode 55 so that +# the exercise is skipped +is_service_enabled n-api || exit 55 + # Skip if the hypervisor is Docker [[ "$VIRT_DRIVER" == "docker" ]] && exit 55 diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh index eb32cc7aa7..d71a1e0755 100755 --- a/exercises/sec_groups.sh +++ b/exercises/sec_groups.sh @@ -33,6 +33,10 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc +# If nova api is not enabled we exit with exitcode 55 so that +# the exercise is skipped +is_service_enabled n-api || exit 55 + # Skip if the hypervisor is Docker [[ "$VIRT_DRIVER" == "docker" ]] && exit 55 From 38d1f2339a88c389e4be44fc00e59f25a62fec14 Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Wed, 8 Jan 2014 09:54:13 -0500 Subject: [PATCH 0032/4119] Add Marconi to Tempest config This patch adds queuing to tempest config, provided queuing is available in devstack. Change-Id: I2925a07d312c1f8ab2fe465f74f0bef9299eef40 Implements: blueprint add-basic-marconi-tests --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 08c0553f03..ef9dfe218b 100644 --- a/lib/tempest +++ b/lib/tempest @@ -329,7 +329,7 @@ function configure_tempest() { iniset $TEMPEST_CONF network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}" # service_available - for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna trove; do + for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna trove marconi; do if is_service_enabled $service ; then iniset $TEMPEST_CONF service_available $service "True" else From d2bcbea5f95377043b0dcdba330501d7b81a4561 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 13 Jan 2014 11:22:41 -0600 Subject: [PATCH 0033/4119] Updates for tools/info.sh * Handle local.conf localrc section * remove blank lines * rather than removing password lines, just remove the password itself to at least show which password vars have been set Change-Id: Ieca9baaf03e53b23e336944ad0ed2581c9bee460 --- tools/info.sh | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/tools/info.sh b/tools/info.sh index 14ab8f6306..3ab7966ab4 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -85,8 +85,8 @@ done # Packages # -------- -# - We are going to check packages only for the services needed. -# - We are parsing the packages files and detecting metadatas. +# - Only check packages for the services enabled +# - Parse version info from the package metadata, not the package/file names for p in $(get_packages $ENABLED_SERVICES); do if [[ "$os_PACKAGE" = "deb" ]]; then @@ -141,9 +141,15 @@ rm $FREEZE_FILE # Dump localrc with 'localrc|' prepended and comments and passwords left out if [[ -r $TOP_DIR/localrc ]]; then + RC=$TOP_DIR/localrc +elif [[ -f $RC_DIR/.localrc.auto ]]; then + RC=$TOP_DIR/.localrc.auto +fi +if [[ -n $RC ]]; then sed -e ' - /PASSWORD/d; + /^[ \t]*$/d; + /PASSWORD/s/=.*$/=\/; /^#/d; s/^/localrc\|/; - ' $TOP_DIR/localrc + ' $RC fi From 279295c72c4e7028fc6eac75412b9b5f92cd630b Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Tue, 14 Jan 2014 11:37:51 +0000 Subject: [PATCH 0034/4119] Fix duplicated rootwrap.d in lib/ironic The Ironic setup of devstack is duplicating the rootwrap.d directory at /etc/ironic/rootwrap.d/rootwrap.d, this will cause the ironic-rootwrap command to fail to execute. This patch is removing the duplicated rootwrap.d directory. Change-Id: I24844c24620b5b33ad1a6acd0d872e9df11d6d89 Closes-Bug: #1268930 --- lib/ironic | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/ironic b/lib/ironic index 1ff3c81f06..afbc3e09e4 100644 --- a/lib/ironic +++ b/lib/ironic @@ -33,7 +33,6 @@ IRONIC_AUTH_CACHE_DIR=${IRONIC_AUTH_CACHE_DIR:-/var/cache/ironic} IRONIC_CONF_DIR=${IRONIC_CONF_DIR:-/etc/ironic} IRONIC_CONF_FILE=$IRONIC_CONF_DIR/ironic.conf IRONIC_ROOTWRAP_CONF=$IRONIC_CONF_DIR/rootwrap.conf -IRONIC_ROOTWRAP_FILTERS=$IRONIC_CONF_DIR/rootwrap.d IRONIC_POLICY_JSON=$IRONIC_CONF_DIR/policy.json # Support entry points installation of console scripts @@ -118,7 +117,7 @@ function configure_ironic_api() { # Sets conductor specific settings. function configure_ironic_conductor() { cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF - cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_ROOTWRAP_FILTERS + cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR iniset $IRONIC_CONF DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF } From ef1e08022b9553b07757005e7a5103fbdc0d99f0 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Thu, 2 Jan 2014 16:33:53 -0800 Subject: [PATCH 0035/4119] Add sanity check framework to verify neutron server/backend integration Some Neutron plugins require controllers and multiple backend services to operate correctly. This patch adds the framework for third party plugins to run sanity checks after Neutron Server has started. This simple addition may reveal potential configuration pitfalls much earlier in the dev/test cycle, thus speeding up the build churn process. The first plugin that uses this framework is the VMware NSX one. Closes-bug: #1265671 Change-Id: I17f9c5c8e828316ff03f0eff42ae4ae6c6c58733 --- lib/neutron | 5 +++++ lib/neutron_thirdparty/README.md | 3 +++ lib/neutron_thirdparty/bigswitch_floodlight | 4 ++++ lib/neutron_thirdparty/midonet | 4 ++++ lib/neutron_thirdparty/ryu | 4 ++++ lib/neutron_thirdparty/trema | 4 ++++ lib/neutron_thirdparty/vmware_nsx | 4 ++++ stack.sh | 1 + 8 files changed, 29 insertions(+) diff --git a/lib/neutron b/lib/neutron index 43f43f951a..81faa103b5 100644 --- a/lib/neutron +++ b/lib/neutron @@ -958,6 +958,11 @@ function stop_neutron_third_party() { _neutron_third_party_do stop } +# check_neutron_third_party_integration() - Check that third party integration is sane +function check_neutron_third_party_integration() { + _neutron_third_party_do check +} + # Restore xtrace $XTRACE diff --git a/lib/neutron_thirdparty/README.md b/lib/neutron_thirdparty/README.md index b289f58c5d..2460e5cac7 100644 --- a/lib/neutron_thirdparty/README.md +++ b/lib/neutron_thirdparty/README.md @@ -34,3 +34,6 @@ functions to be implemented * ``stop_``: stop running processes (non-screen) + +* ``check_``: + verify that the integration between neutron server and third-party components is sane diff --git a/lib/neutron_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight index ebde0673b8..1fd4fd801a 100644 --- a/lib/neutron_thirdparty/bigswitch_floodlight +++ b/lib/neutron_thirdparty/bigswitch_floodlight @@ -45,5 +45,9 @@ function stop_bigswitch_floodlight() { : } +function check_bigswitch_floodlight() { + : +} + # Restore xtrace $MY_XTRACE diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet index 7928bca31f..e672528a2d 100644 --- a/lib/neutron_thirdparty/midonet +++ b/lib/neutron_thirdparty/midonet @@ -56,5 +56,9 @@ function stop_midonet() { : } +function check_midonet() { + : +} + # Restore xtrace $MY_XTRACE diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu index 3b825a10c1..5edf273361 100644 --- a/lib/neutron_thirdparty/ryu +++ b/lib/neutron_thirdparty/ryu @@ -75,5 +75,9 @@ function stop_ryu() { : } +function check_ryu() { + : +} + # Restore xtrace $MY_XTRACE diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema index bdc23568fb..2b125646dc 100644 --- a/lib/neutron_thirdparty/trema +++ b/lib/neutron_thirdparty/trema @@ -109,5 +109,9 @@ function stop_trema() { sudo TREMA_TMP=$TREMA_TMP_DIR trema killall } +function check_trema() { + : +} + # Restore xtrace $MY_XTRACE diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx index 70d348274f..7c6202723f 100644 --- a/lib/neutron_thirdparty/vmware_nsx +++ b/lib/neutron_thirdparty/vmware_nsx @@ -78,5 +78,9 @@ function stop_vmware_nsx() { done } +function check_vmware_nsx() { + : +} + # Restore xtrace $MY_XTRACE diff --git a/stack.sh b/stack.sh index 7da41a98c8..621a058444 100755 --- a/stack.sh +++ b/stack.sh @@ -1116,6 +1116,7 @@ fi if is_service_enabled q-svc; then echo_summary "Starting Neutron" start_neutron_service_and_check + check_neutron_third_party_integration elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then NM_CONF=${NOVA_CONF} if is_service_enabled n-cell; then From 5eec5b6b80401842ad1f7275d9c7a6949cc6f848 Mon Sep 17 00:00:00 2001 From: Gordon Chung Date: Tue, 14 Jan 2014 11:05:31 -0500 Subject: [PATCH 0036/4119] command not found errors on unstack - add lib/ceilometer and lib/heat to source list for when stop_heat and stop_ceilometer functions are called. - add lib/tls source to lib/keystone for when is_ssl_enabled_service function called. Change-Id: Ief05766e9cfda71fb6392c8a757d04751283414e Closes-Bug: #1269047 --- lib/keystone | 1 + unstack.sh | 2 ++ 2 files changed, 3 insertions(+) diff --git a/lib/keystone b/lib/keystone index a7e5d66808..ceefe6a144 100644 --- a/lib/keystone +++ b/lib/keystone @@ -28,6 +28,7 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace +source $TOP_DIR/lib/tls # Defaults # -------- diff --git a/unstack.sh b/unstack.sh index 77dbe074d2..4445f1fb31 100755 --- a/unstack.sh +++ b/unstack.sh @@ -35,10 +35,12 @@ source $TOP_DIR/lib/apache # Get project function libraries source $TOP_DIR/lib/baremetal +source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/cinder source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova +source $TOP_DIR/lib/heat source $TOP_DIR/lib/horizon source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron From 52a7b6ecbad11c08dcd77a6fcd8bfef6a20324a9 Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Tue, 14 Jan 2014 18:52:51 +0100 Subject: [PATCH 0037/4119] Run neutron-debug with admin tenant in neutron-adv-test Because neutron-debug create-probe needs admin role only, demo tenants cannot create ports. neutron-debug is wrapped in order to run it only with admin tenant. Change-Id: Ib65e8639858c597345c6a5fdc0192b40f34a0300 Closes-Bug: #1269090 --- exercises/neutron-adv-test.sh | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index 0c0d42f458..1343f11553 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -185,6 +185,14 @@ function confirm_server_active { fi } +function neutron_debug_admin { + local os_username=$OS_USERNAME + local os_tenant_id=$OS_TENANT_ID + source $TOP_DIR/openrc admin admin + neutron-debug $@ + source $TOP_DIR/openrc $os_username $os_tenant_id +} + function add_tenant { local TENANT=$1 local USER=$2 @@ -241,7 +249,7 @@ function create_network { local NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA" neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR - neutron-debug probe-create --device-owner compute $NET_ID + neutron_debug_admin probe-create --device-owner compute $NET_ID source $TOP_DIR/openrc demo demo } From 55d9b9a9517ebe8c37f82136ff5eb7b781929325 Mon Sep 17 00:00:00 2001 From: Shiv Haris Date: Tue, 14 Jan 2014 11:33:28 -0800 Subject: [PATCH 0038/4119] Fix typo NEUTON to NEUTRON Fixes bug: #1269111 Change-Id: Icf66b4d474698b5f3ca22bc656ecd12d03164bce --- lib/neutron_plugins/brocade | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade index f9275cacc2..8e18d04984 100644 --- a/lib/neutron_plugins/brocade +++ b/lib/neutron_plugins/brocade @@ -38,7 +38,7 @@ function neutron_plugin_configure_l3_agent() { } function neutron_plugin_configure_plugin_agent() { - AGENT_BINARY="$NEUTON_BIN_DIR/neutron-linuxbridge-agent" + AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-linuxbridge-agent" } function neutron_plugin_setup_interface_driver() { From b4a215cce2c649ce811893f5e57b7ee6c55158e8 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Fri, 10 Jan 2014 16:39:32 +0900 Subject: [PATCH 0039/4119] Sanitize language settings To avoid commands bailing out with "unsupported locale setting" errors. Change-Id: I54ae4cd84a0a4b4875533181b1d96563a1604775 --- stack.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/stack.sh b/stack.sh index 7da41a98c8..c52514413c 100755 --- a/stack.sh +++ b/stack.sh @@ -23,6 +23,13 @@ # Make sure custom grep options don't get in the way unset GREP_OPTIONS +# Sanitize language settings to avoid commands bailing out +# with "unsupported locale setting" errors. +unset LANG +unset LANGUAGE +LC_ALL=C +export LC_ALL + # Keep track of the devstack directory TOP_DIR=$(cd $(dirname "$0") && pwd) From d5a5460888869eb22cc6f2622c3adbf492680971 Mon Sep 17 00:00:00 2001 From: Steven Dake Date: Wed, 15 Jan 2014 10:56:51 -0700 Subject: [PATCH 0040/4119] Revert "Change the libvirtd log level to DEBUG" Suggested by Daniel Berrange in this thread: http://lists.openstack.org/pipermail/openstack-dev/2014-January/024407.html This reverts commit 3bd85c9d6e257fc952cb3c6d0c09e199685bd5ed. Change-Id: I370ba61cf8a00b51684cd504fed4ba4078d868be --- lib/nova_plugins/hypervisor-libvirt | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index ef40e7ab4c..6f90f4ac17 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -93,9 +93,6 @@ EOF" fi fi - # Change the libvirtd log level to DEBUG. - sudo sed -i s/"#log_level = 3"/"log_level = 1"/ /etc/libvirt/libvirtd.conf - # The user that nova runs as needs to be member of **libvirtd** group otherwise # nova-compute will be unable to use libvirt. if ! getent group $LIBVIRT_GROUP >/dev/null; then From 2394605a635c86c9a90f683f1f3a3ee718d17d5f Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 15 Jan 2014 21:42:32 +0000 Subject: [PATCH 0041/4119] Typo: funstions=>functions Change-Id: I59caf62b049d09450ce3236648cf1ede2f48e7f5 --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 43f43f951a..5dc5703f3c 100644 --- a/lib/neutron +++ b/lib/neutron @@ -1,5 +1,5 @@ # lib/neutron -# functions - funstions specific to neutron +# functions - functions specific to neutron # Dependencies: # ``functions`` file From 14daa57d67fed6dc98b833f4c3698fef8ff7f312 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 15 Jan 2014 21:43:25 +0000 Subject: [PATCH 0042/4119] Remove old DEFAULT.root_helper setting root_helper is now under the agent group and not DEFAULT Change-Id: I11867f7ceff1f3b8b0bc2ef8aa508b6ecee653fc --- lib/neutron | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/neutron b/lib/neutron index 43f43f951a..fd61d140d3 100644 --- a/lib/neutron +++ b/lib/neutron @@ -611,9 +611,6 @@ function _configure_neutron_debug_command() { iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT verbose False iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT debug False iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND" - # Intermediate fix until Neutron patch lands and then line above will - # be cleaned. iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper "$Q_RR_COMMAND" _neutron_setup_interface_driver $NEUTRON_TEST_CONFIG_FILE From fe4c4f7a9e6d1a4f26c67b8e1609fc5e80c5ef83 Mon Sep 17 00:00:00 2001 From: john-griffith Date: Wed, 15 Jan 2014 11:24:03 -0700 Subject: [PATCH 0043/4119] Update cinder cert script to use run_tempest Changes to tempest run_tests.sh (commit: 17520e49a7e69b3817856a739121a1fb2906f2cc) breaks the cinder_driver_cert script. A backward compatible run_tempest.sh script was added, so for now we should update the cinder_driver_cert script to use that Change-Id: I611a01dd4788ae01da8a6167a530f9e44733dfc6 Closes-Bug: #1269531 --- driver_certs/cinder_driver_cert.sh | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh index 18bef8b3b5..edcc6d4800 100755 --- a/driver_certs/cinder_driver_cert.sh +++ b/driver_certs/cinder_driver_cert.sh @@ -2,6 +2,22 @@ # **cinder_cert.sh** +# This script is a simple wrapper around the tempest volume api tests +# It requires that you have a working and functional devstack install +# and that you've enabled your device driver by making the necessary +# modifications to /etc/cinder/cinder.conf + +# This script will refresh your openstack repo's and restart the cinder +# services to pick up your driver changes. +# please NOTE; this script assumes your devstack install is functional +# and includes tempest. A good first step is to make sure you can +# create volumes on your device before you even try and run this script. + +# It also assumes default install location (/opt/stack/xxx) +# to aid in debug, you should also verify that you've added +# an output directory for screen logs: +# SCREEN_LOGDIR=/opt/stack/screen-logs + CERT_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=$(cd $CERT_DIR/..; pwd) @@ -73,9 +89,9 @@ start_cinder sleep 5 # run tempest api/volume/test_* -log_message "Run the actual tempest volume tests (run_tests.sh -N tempest.api.volume.test_*)...", True +log_message "Run the actual tempest volume tests (./tools/pretty_tox.sh api.volume_*)...", True exec 2> >(tee -a $TEMPFILE) -`./run_tests.sh -N tempest.api.volume.test_*` +`./tools/pretty_tox.sh api.volume` if [[ $? = 0 ]]; then log_message "CONGRATULATIONS!!! Device driver PASSED!", True log_message "Submit output: ($TEMPFILE)" From a0a23311c3c40f631663468e1ba45d5e84790019 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 15 Jan 2014 15:24:30 -0500 Subject: [PATCH 0044/4119] updated sar options to collect more data in order to have better data on the load state of the test nodes we should track things beyond just cpu time. Add in load time, process creation rates, and io rates during the tests. also add a sar filter that makes it report on one line reading sar input with multiple flags is somewhat problematic, because it's tons of interspersed headers. So build something with does a pivot filter to make it possible to get this all on one line. Change-Id: I8f085cedda65dfc37ad530eb97ba1fc5577314c3 --- stack.sh | 12 +++++-- tools/sar_filter.py | 82 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+), 3 deletions(-) create mode 100755 tools/sar_filter.py diff --git a/stack.sh b/stack.sh index 7da41a98c8..382b75e7fc 100755 --- a/stack.sh +++ b/stack.sh @@ -860,11 +860,17 @@ init_service_check # ------- # If enabled, systat has to start early to track OpenStack service startup. -if is_service_enabled sysstat;then +if is_service_enabled sysstat; then + # what we want to measure + # -u : cpu statitics + # -q : load + # -b : io load rates + # -w : process creation and context switch rates + SYSSTAT_OPTS="-u -q -b -w" if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it sysstat "cd ; sar -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" + screen_it sysstat "cd $TOP_DIR; ./tools/sar_filter.py $SYSSTAT_OPTS -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" else - screen_it sysstat "sar $SYSSTAT_INTERVAL" + screen_it sysstat "./tools/sar_filter.py $SYSSTAT_OPTS $SYSSTAT_INTERVAL" fi fi diff --git a/tools/sar_filter.py b/tools/sar_filter.py new file mode 100755 index 0000000000..ed8c19687c --- /dev/null +++ b/tools/sar_filter.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python +# +# Copyright 2014 Samsung Electronics Corp. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re +import subprocess +import sys + + +def is_data_line(line): + timestamp, data = parse_line(line) + return re.search('\d\.d', data) + + +def parse_line(line): + m = re.search('(\d\d:\d\d:\d\d \w\w)(\s+((\S+)\s*)+)', line) + if m: + date = m.group(1) + data = m.group(2).rstrip() + return date, data + else: + return None, None + + +process = subprocess.Popen( + "sar %s" % " ".join(sys.argv[1:]), + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + +# Poll process for new output until finished + +start_time = "" +header = "" +data_line = "" +printed_header = False +current_ts = None +while True: + nextline = process.stdout.readline() + if nextline == '' and process.poll() is not None: + break + + date, data = parse_line(nextline) + # stop until we get to the first set of real lines + if not date: + continue + + # now we eat the header lines, and only print out the header + # if we've never seen them before + if not start_time: + start_time = date + header += "%s %s" % (date, data) + elif date == start_time: + header += " %s" % data + elif not printed_header: + printed_header = True + print header + + # now we know this is a data line, printing out if the timestamp + # has changed, and stacking up otherwise. + nextline = process.stdout.readline() + date, data = parse_line(nextline) + if date != current_ts: + current_ts = date + print data_line + data_line = "%s %s" % (date, data) + else: + data_line += " %s" % data + + sys.stdout.flush() From 0049c0c434b4672963b6622486c6c638259bdfda Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Thu, 16 Jan 2014 18:16:48 -0600 Subject: [PATCH 0045/4119] Make unstack.sh more like stack.sh unstack.sh and stack.sh both have to "configure projects", but the code was different. This change makes it so the 2 sections of the files are the same. Change-Id: Ia06f8bbfbe2a6e87fb406e34e13a39bd7fa9e5af --- lib/keystone | 2 -- stack.sh | 6 +++++- unstack.sh | 23 +++++++++++++++++------ 3 files changed, 22 insertions(+), 9 deletions(-) diff --git a/lib/keystone b/lib/keystone index ceefe6a144..71ac668ce5 100644 --- a/lib/keystone +++ b/lib/keystone @@ -28,8 +28,6 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace -source $TOP_DIR/lib/tls - # Defaults # -------- diff --git a/stack.sh b/stack.sh index 7da41a98c8..50a4cd2af9 100755 --- a/stack.sh +++ b/stack.sh @@ -305,9 +305,13 @@ rm -f $SSL_BUNDLE_FILE # Configure Projects # ================== -# Source project function libraries +# Import apache functions source $TOP_DIR/lib/apache + +# Import TLS functions source $TOP_DIR/lib/tls + +# Source project function libraries source $TOP_DIR/lib/infra source $TOP_DIR/lib/oslo source $TOP_DIR/lib/stackforge diff --git a/unstack.sh b/unstack.sh index 4445f1fb31..31f6f01c8f 100755 --- a/unstack.sh +++ b/unstack.sh @@ -30,20 +30,31 @@ if [[ $EUID -eq 0 ]]; then exit 1 fi + +# Configure Projects +# ================== + # Import apache functions source $TOP_DIR/lib/apache -# Get project function libraries -source $TOP_DIR/lib/baremetal -source $TOP_DIR/lib/ceilometer -source $TOP_DIR/lib/cinder +# Import TLS functions +source $TOP_DIR/lib/tls + +# Source project function libraries +source $TOP_DIR/lib/infra +source $TOP_DIR/lib/oslo +source $TOP_DIR/lib/stackforge +source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova -source $TOP_DIR/lib/heat -source $TOP_DIR/lib/horizon +source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift +source $TOP_DIR/lib/ceilometer +source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron +source $TOP_DIR/lib/baremetal +source $TOP_DIR/lib/ldap source $TOP_DIR/lib/ironic source $TOP_DIR/lib/trove From 04f6dc24a7845ee139977fa5b0c5e53aad8e99bd Mon Sep 17 00:00:00 2001 From: Emilien Macchi Date: Thu, 16 Jan 2014 18:03:38 -0500 Subject: [PATCH 0046/4119] Fix stop_neutron metadata agent function Currently, stop_neutron fails in Jenkins because it kills itself. This patch ensure we kill only neutron metadata agent, and not the awk process in itself. Change-Id: I25d1d90e002fa9eb3c5bc366cc74cb70a2daa69f Closes-bug: #1269982 Signed-off-by: Emilien Macchi --- lib/neutron | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index 43f43f951a..a909b8b81c 100644 --- a/lib/neutron +++ b/lib/neutron @@ -505,8 +505,7 @@ function stop_neutron() { [ ! -z "$pid" ] && sudo kill -9 $pid fi if is_service_enabled q-meta; then - pid=$(ps aux | awk '/neutron-ns-metadata-proxy/ { print $2 }') - [ ! -z "$pid" ] && sudo kill -9 $pid + pkill -9 -f neutron-ns-metadata-proxy fi if is_service_enabled q-lbaas; then From 39d500335ad2bff0ffdf1d543d0d7528b3812480 Mon Sep 17 00:00:00 2001 From: Ana Krivokapic Date: Mon, 6 Jan 2014 21:46:35 +0100 Subject: [PATCH 0047/4119] Add missing mongodb client package on Fedora On Fedora, when ceilometer is enabled and mongodb is used as backend, devstack installation would fail due to missing mongodb client package. This patch ensures the package gets installed. Change-Id: I981bb55f86541e5ff19c52160269a7789b94423f --- files/rpms/ceilometer-collector | 1 + lib/ceilometer | 2 ++ 2 files changed, 3 insertions(+) diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector index d7b7ea89c1..c91bac36a2 100644 --- a/files/rpms/ceilometer-collector +++ b/files/rpms/ceilometer-collector @@ -1,3 +1,4 @@ selinux-policy-targeted mongodb-server pymongo +mongodb # NOPRIME diff --git a/lib/ceilometer b/lib/ceilometer index 75058c05a5..d0f00c07eb 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -151,6 +151,8 @@ function configure_ceilometer() { function configure_mongodb() { if is_fedora; then + # install mongodb client + install_package mongodb # ensure smallfiles selected to minimize freespace requirements sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod From 9acb965e572d672f1d5632ee92768b4708b03fbd Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Sun, 19 Jan 2014 11:05:08 +1300 Subject: [PATCH 0048/4119] Do not set bind_host for heat APIs This results in the APIs binding to 0.0.0.0 which is what other devstack services bind to anyway. Change-Id: Ic229dbed02b224fe7c5e14f20998bb5d5987aa39 Closes-Bug: #1172991 --- lib/heat | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/heat b/lib/heat index e35305b843..0307c64ae1 100644 --- a/lib/heat +++ b/lib/heat @@ -110,15 +110,12 @@ function configure_heat() { [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_CONF paste_deploy flavor standalone # OpenStack API - iniset $HEAT_CONF heat_api bind_host $HEAT_API_HOST iniset $HEAT_CONF heat_api bind_port $HEAT_API_PORT # Cloudformation API - iniset $HEAT_CONF heat_api_cfn bind_host $HEAT_API_CFN_HOST iniset $HEAT_CONF heat_api_cfn bind_port $HEAT_API_CFN_PORT # Cloudwatch API - iniset $HEAT_CONF heat_api_cloudwatch bind_host $HEAT_API_CW_HOST iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT # heat environment From cf903938eceb0188c9ecd405e6c89b63b1c8910d Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Mon, 20 Jan 2014 18:18:58 +0100 Subject: [PATCH 0049/4119] Added missing sudo when killing ns-metadata Closes-bug: #1269982 Change-Id: Ib6b641a8d5c92fb4a8aaed6b5d7b63e66acd6bd9 --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 465b57cc35..4b280d1d53 100644 --- a/lib/neutron +++ b/lib/neutron @@ -505,7 +505,7 @@ function stop_neutron() { [ ! -z "$pid" ] && sudo kill -9 $pid fi if is_service_enabled q-meta; then - pkill -9 -f neutron-ns-metadata-proxy + sudo pkill -9 neutron-ns-metadata-proxy || : fi if is_service_enabled q-lbaas; then From c75c78ad5d0473bc97bf859810ddfc18bf270aa2 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Tue, 21 Jan 2014 15:01:01 +0000 Subject: [PATCH 0050/4119] Add xenserver image By adding a separate entry for xenserver, it will enforce the gate to cache cirros-0.3.0-x86_64-disk.vhd.tgz. Change-Id: Ibfd4618e98f079a53fc286f5e95f18a3d658e4d2 --- stackrc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stackrc b/stackrc index 49fb26b2c7..8a0280ecfa 100644 --- a/stackrc +++ b/stackrc @@ -284,6 +284,9 @@ case "$VIRT_DRIVER" in vsphere) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-debian-2.6.32-i686} IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/debian-2.6.32-i686.vmdk"};; + xenserver) + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-disk} + IMAGE_URLS=${IMAGE_URLS:-"https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz"};; *) # Default to Cirros with kernel, ramdisk and disk image DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.1-x86_64-uec} IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};; From e7a94efe77bf6738fcb778f36cf18ceb82a0fae6 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 21 Jan 2014 13:17:24 -0500 Subject: [PATCH 0051/4119] disable client side libvirt debug logging and tune server side libvirt logging to the values that danpb suggested would be useful on the openstack-dev mailing list. Change-Id: I4b1c780d1dd4d2eecc81fabe42c07cc2a9e0e3f4 --- lib/nova | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/nova b/lib/nova index a50878950c..a4edb53cf8 100644 --- a/lib/nova +++ b/lib/nova @@ -650,12 +650,11 @@ function start_nova_compute() { if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then # Enable client side traces for libvirt - export LIBVIRT_LOG_FILTERS="1:libvirt" - export LIBVIRT_LOG_OUTPUTS="1:file:/var/log/libvirt/libvirtd-nova.log" - + local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" + local log_outputs="1:file:/var/log/libvirt/libvirtd.log" # Enable server side traces for libvirtd - echo "log_filters=\"1:libvirt 1:qemu\"" | sudo tee -a /etc/libvirt/libvirtd.conf - echo "log_outputs=\"1:file:/var/log/libvirt/libvirtd.log\"" | sudo tee -a /etc/libvirt/libvirtd.conf + echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf + echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf # The group **$LIBVIRT_GROUP** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. From 1b6ba540887ab73432488f5d81339227052c423c Mon Sep 17 00:00:00 2001 From: ZhiQiang Fan Date: Wed, 22 Jan 2014 22:56:59 +0800 Subject: [PATCH 0052/4119] Remove unnecessary slash from ceilometer endpoint The last slash in ceilometer endpoint is not needed, it should be removed because it will generate redundant slash which has been treated as a bug in ceilometer. Change-Id: Ifcff9b63921f5b1dda667d8e77aab22ca2928a8b Closes-Bug: #1271556 ref: https://review.openstack.org/#/c/63279/ --- lib/ceilometer | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 75058c05a5..18f146eb90 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -85,9 +85,9 @@ create_ceilometer_accounts() { keystone endpoint-create \ --region RegionOne \ --service_id $CEILOMETER_SERVICE \ - --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ - --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ - --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" + --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" \ + --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" \ + --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" fi fi } From 4968d1ad5d8d6b0537c68548eb5f8c08bc33f63a Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Wed, 22 Jan 2014 19:06:44 -0600 Subject: [PATCH 0053/4119] Keystone use common logging setup The Keystone setup was using logging.conf to configure logging, unlike other projects. This may have been left over from before Keystone switched to oslo logging. Switching to common logging configuration allows: - Common format for logs for easier parsing - Pretty colorized logs - Keystone can control the default logging levels for libraries that are used by setting the defaults in keystone. - Potentially using a function to setup logging for all components using oslo-logging (e.g., share with lib/nova). Change-Id: I4e9b1e6cffce30f16a1e039224312852b8abda07 Closes-Bug: #1271775 Closes-Bug: #1269987 --- lib/keystone | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/keystone b/lib/keystone index ceefe6a144..7f0bcf24a7 100644 --- a/lib/keystone +++ b/lib/keystone @@ -247,14 +247,14 @@ function configure_keystone() { fi # Set up logging - LOGGING_ROOT="devel" if [ "$SYSLOG" != "False" ]; then - LOGGING_ROOT="$LOGGING_ROOT,production" + iniset $KEYSTONE_CONF DEFAULT use_syslog "True" + fi + + # Format logging + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + setup_colorized_logging $KEYSTONE_CONF DEFAULT fi - KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_CONF_DIR/logging.conf" - cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_CONF_DIR/logging.conf - iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG" - iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production" if is_apache_enabled_service key; then _config_keystone_apache_wsgi @@ -412,7 +412,7 @@ function start_keystone() { screen_it key "cd $KEYSTONE_DIR && sudo tail -f /var/log/$APACHE_NAME/keystone" else # Start Keystone in a screen window - screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG --debug" + screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF --debug" fi echo "Waiting for keystone to start..." From 0b8f6e0fbba961de04d95ff0e06e515d1ea3ea8b Mon Sep 17 00:00:00 2001 From: IWAMOTO Toshihiro Date: Thu, 23 Jan 2014 12:02:34 +0900 Subject: [PATCH 0054/4119] Make sure not to revert local changes. "git diff --quiet" has a bug ignoring local changes if there's a unchanged file with a newer timestamp. This patch works around the bug. Change-Id: I0ddc24e0f7af21287c43c1e04dd166ebff6f2dca Closes-Bug: 1264422 --- functions | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 92b61ed974..eb92a6c615 100644 --- a/functions +++ b/functions @@ -1301,7 +1301,8 @@ function setup_develop() { echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" # Don't update repo if local changes exist - (cd $project_dir && git diff --quiet) + # Don't use buggy "git diff --quiet" + (cd $project_dir && git diff --exit-code >/dev/null) local update_requirements=$? if [ $update_requirements -eq 0 ]; then From ab491bcc88acd83e9fa21de1d4a3fe60bfba577a Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Tue, 21 Jan 2014 11:18:11 +0900 Subject: [PATCH 0055/4119] Add get-pip.py/*.qcow2 to .gitignore files/get-pip.py and *.qcow2 are installed by DevStack itself. So we shouldn't manage it with the git repository. Change-Id: Ib22ed814d3d3eb33ef3ff45874b0ff36b2036cf5 --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 43652024f3..1840352636 100644 --- a/.gitignore +++ b/.gitignore @@ -7,8 +7,10 @@ src localrc local.sh files/*.gz +files/*.qcow2 files/images files/pip-* +files/get-pip.py stack-screenrc *.pem accrc From 55c468c422ae7bc48f46847d6fa21e53d4673259 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Thu, 23 Jan 2014 15:01:50 +0400 Subject: [PATCH 0056/4119] Use DATABASE/connection opt for db url in Savanna DATABASE/sql_connection opt is now deprecated. Change-Id: I58058f0d51e16de53e6472c8c01065438d709edc --- lib/savanna | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/savanna b/lib/savanna index bb4dfe693d..57d8ac39ce 100644 --- a/lib/savanna +++ b/lib/savanna @@ -96,8 +96,7 @@ function configure_savanna() { iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG recreate_database savanna utf8 - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database sql_connection `database_connection_url savanna` - inicomment $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database connection + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database connection `database_connection_url savanna` if is_service_enabled neutron; then iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_neutron true From fe42255bfac23a74890c2c7d8cfef385428cef32 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Thu, 23 Jan 2014 14:18:54 +0400 Subject: [PATCH 0057/4119] Use savanna-db-manage to init db for Savanna It uses alembic migrations to initialize database. Change-Id: I6cf01f69c6bc7c9e403040607dd397cfc3b574a4 --- lib/savanna | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/savanna b/lib/savanna index 57d8ac39ce..c7d59f79c4 100644 --- a/lib/savanna +++ b/lib/savanna @@ -95,7 +95,6 @@ function configure_savanna() { iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG - recreate_database savanna utf8 iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database connection `database_connection_url savanna` if is_service_enabled neutron; then @@ -104,6 +103,9 @@ function configure_savanna() { fi iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG + + recreate_database savanna utf8 + $SAVANNA_BIN_DIR/savanna-db-manage --config-file $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE upgrade head } # install_savanna() - Collect source and prepare From 579af5d6786f62008807a473749600e88cea21fc Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 23 Jan 2014 11:32:22 -0600 Subject: [PATCH 0058/4119] Kill process groups in screen_stop() Previously only the top child process was killed, killing the process group also takes all of the child processes with it. Closes-bug: 1271889 Change-Id: If1864cc4f1944f417ea3473d81d8b6e8e40030c2 --- functions | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 92b61ed974..13d021e147 100644 --- a/functions +++ b/functions @@ -1150,6 +1150,9 @@ function screen_it { # Stop a service in screen +# If a PID is available use it, kill the whole process group via TERM +# If screen is being used kill the screen window; this will catch processes +# that did not leave a PID behind # screen_stop service function screen_stop() { SCREEN_NAME=${SCREEN_NAME:-stack} @@ -1159,7 +1162,7 @@ function screen_stop() { if is_service_enabled $1; then # Kill via pid if we have one available if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then - pkill -TERM -P $(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid) + pkill -TERM -P -$(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid) rm $SERVICE_DIR/$SCREEN_NAME/$1.pid fi if [[ "$USE_SCREEN" = "True" ]]; then From c3e5b77b45068ed07e53fdda1276f5c863de5973 Mon Sep 17 00:00:00 2001 From: Flavio Percoco Date: Thu, 23 Jan 2014 13:48:16 +0100 Subject: [PATCH 0059/4119] Add missing file argument to iniset_multiline Change Id9aab356b36b2150312324a0349d120bbbbd4e63 introduced a call to iniset_multiline to enable swift stores explicitly. However, the call has a missing file argument which resulted in this call setting the values to the wrong file, section and param. This patch fixes that. Change-Id: Ib17048e05c467bc8ca2c13fe4297d6bac6c8a880 --- lib/glance | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index 21c1fa595a..55d5fb37ec 100644 --- a/lib/glance +++ b/lib/glance @@ -125,7 +125,7 @@ function configure_glance() { iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True - iniset_multiline DEFAULT known_stores glance.store.filesystem.Store glance.store.http.Store glance.store.swift.Store + iniset $GLANCE_API_CONF DEFAULT known_stores "glance.store.filesystem.Store, glance.store.http.Store, glance.store.swift.Store" fi cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI From bdeadf59d4273515df0f47edb820ff159bbc5380 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Thu, 23 Jan 2014 17:41:18 +0000 Subject: [PATCH 0060/4119] Add pidstat support pidstat is a script that comes from sysstat, but will give us per-process information. Allow enabling "pidstat" that will run pidstat to give info every 5 seconds by default. Change-Id: I5ec7d5abce81125b55985bba3ccaf8073ccdfa2a --- stack.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/stack.sh b/stack.sh index efdee64b34..1d02c16ff8 100755 --- a/stack.sh +++ b/stack.sh @@ -291,6 +291,9 @@ SYSLOG_PORT=${SYSLOG_PORT:-516} SYSSTAT_FILE=${SYSSTAT_FILE:-"sysstat.dat"} SYSSTAT_INTERVAL=${SYSSTAT_INTERVAL:-"1"} +PIDSTAT_FILE=${PIDSTAT_FILE:-"pidstat.txt"} +PIDSTAT_INTERVAL=${PIDSTAT_INTERVAL:-"5"} + # Use color for logging output (only available if syslog is not used) LOG_COLOR=`trueorfalse True $LOG_COLOR` @@ -874,6 +877,16 @@ if is_service_enabled sysstat; then fi fi +if is_service_enabled pidstat; then + # Per-process stats + PIDSTAT_OPTS="-l -p ALL -T ALL" + if [[ -n ${SCREEN_LOGDIR} ]]; then + screen_it pidstat "cd $TOP_DIR; pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL > $SCREEN_LOGDIR/$PIDSTAT_FILE" + else + screen_it pidstat "pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL" + fi +fi + # Start Services # ============== From b93cd643432d3633c48bec02fcd7cb4f354f67ed Mon Sep 17 00:00:00 2001 From: Arnaud Legendre Date: Thu, 23 Jan 2014 17:12:21 -0800 Subject: [PATCH 0061/4119] upload_image.sh should parse filenames correctly The upload_image script gives the ability to the user to provide specific metadata using the filename: file-adapter_type;disk_type;network_type.vmdk Currently, the regex expects each of these types to be populated. This patch fixes this issue by making the regex more flexible and accepts only one of these metadata to be populated. Change-Id: If74cb06cc640864e7e91fd88943cdb37e05935d6 Closes-Bug: #1272126 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 92b61ed974..276cea1e04 100644 --- a/functions +++ b/functions @@ -1539,7 +1539,7 @@ function upload_image() { # NOTE: For backwards compatibility reasons, colons may be used in place # of semi-colons for property delimiters but they are not permitted # characters in NTFS filesystems. - property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).+[:;].+[:;].+$'` + property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$'` IFS=':;' read -a props <<< "$property_string" vmdk_disktype="${props[0]:-$vmdk_disktype}" vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}" From ab0595e9cd8f9bc77a3bb7e6c9611c2c771b0781 Mon Sep 17 00:00:00 2001 From: Gordon Chung Date: Thu, 16 Jan 2014 09:44:57 -0500 Subject: [PATCH 0062/4119] ERRORs in ceilometer-acentral log after succesful tempest run recent merge added duplicate creation of ceilometer user. remove ceilometer user creation from keystone_data so we can correctly add ResellerAdmin role to ceilometer user which it needs to interact with swift Change-Id: I043c6b9337dfb147c3c8f364b462708a4030b41c Closes-Bug: #1268730 --- files/keystone_data.sh | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 07b6b601d2..d477c42906 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -6,7 +6,6 @@ # ------------------------------------------------------------------ # service glance admin # service heat service # if enabled -# service ceilometer admin # if enabled # Tempest Only: # alt_demo alt_demo Member # @@ -113,30 +112,11 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then fi # Ceilometer -if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then - keystone user-create --name=ceilometer \ - --pass="$SERVICE_PASSWORD" \ - --tenant $SERVICE_TENANT_NAME \ - --email=ceilometer@example.com - keystone user-role-add --tenant $SERVICE_TENANT_NAME \ - --user ceilometer \ - --role admin +if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then # Ceilometer needs ResellerAdmin role to access swift account stats. keystone user-role-add --tenant $SERVICE_TENANT_NAME \ --user ceilometer \ --role ResellerAdmin - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - keystone service-create \ - --name=ceilometer \ - --type=metering \ - --description="Ceilometer Service" - keystone endpoint-create \ - --region RegionOne \ - --service ceilometer \ - --publicurl "http://$SERVICE_HOST:8777" \ - --adminurl "http://$SERVICE_HOST:8777" \ - --internalurl "http://$SERVICE_HOST:8777" - fi fi # EC2 From dc4dc7f03335e26ea3d86b6184f0475cc5f3d51b Mon Sep 17 00:00:00 2001 From: john-griffith Date: Wed, 22 Jan 2014 18:09:32 -0700 Subject: [PATCH 0063/4119] Fix up tempest conf settings The tempest api.volume.test_volume_types test won't work with non-default drivers configured for cinder's backend any more. The reason is that we create a type using capability scheduler keywords in the extra-specs for the test; (vendor_name and storage_protocol). The result is the extra-spec uses the filters: "vendor_name=Open Source" and "storage_protocol=iSCSI", but for example if you have another backend say SolidFire, EMC, NetApp, IBM etc the capabilities filter will fail the create with a "No valid host available". This is intended to work by simply setting these values in your tempest.conf file. That's fine, however upon setting this up in my localrc I found that the tempest config variables being set via devtsack were never picked up Currently devstack doesn't use the same variable names for configuration variables as tempest expects. Devstack is using the variable "TEMPEST_CONF" however the Tempest project is expecting the variable "TEMPEST_CONFIG", so currently the devstack lib/tempest rc variables are never picked up by tempest properly. This change modifes devstack's naming of TEMPEST_CONF, my though being that since this doesn't work in devstack currently that changing it here would be better than changing it in Tempest where it's possible people had their own custoizations already outside of devstack. In addition this change creates rc variables in devstack to actually set these via devstack. The idea here is that Cinder 3'rd party testing needs to be a simple devstack config and run stack.sh. By fixing up the configuration file variable naming and adding the variables for the vendor and protocol settings that's now possible. An example localrc for a custom config is shown below. The example sets the tempest config file to /etc/tempest/tempest.conf, and configures tempest to use the SolidFire driver as the cinder backend. TEMPEST_VOLUME_VENDOR ==> tempest.conf.volume_vendor TEMPEST_STORAGE_PROTOCOL ==> tempest.conf.storage_protocol relevant example localrc entries: TEMPEST_CONFIG=/etc/tempest/tempest.conf TEMPEST_CONFIG_DIR=/etc/tempest TEMPEST_VOLUME_DRIVER=solidfire TEMPEST_VOLUME_VENDOR="SolidFire Inc" ***NOTE*** storage_protocol and vendor_name MUST match what the backend device reports from get capabilities. Change-Id: I28dfa90c877b27f5d4919f2748fae092bb2f87fa Closes-Bug: 1271781 --- lib/tempest | 141 +++++++++++++++++++++++++++++----------------------- 1 file changed, 78 insertions(+), 63 deletions(-) diff --git a/lib/tempest b/lib/tempest index ef9dfe218b..a13cf10e84 100644 --- a/lib/tempest +++ b/lib/tempest @@ -46,8 +46,8 @@ set +o xtrace # Set up default directories TEMPEST_DIR=$DEST/tempest -TEMPEST_CONF_DIR=$TEMPEST_DIR/etc -TEMPEST_CONF=$TEMPEST_CONF_DIR/tempest.conf +TEMPEST_CONFIG_DIR=${TEMPEST_CONFIG_DIR:-$TEMPEST_DIR/etc} +TEMPEST_CONFIG=$TEMPEST_CONFIG_DIR/tempest.conf TEMPEST_STATE_PATH=${TEMPEST_STATE_PATH:=$DATA_DIR/tempest} NOVA_SOURCE_DIR=$DEST/nova @@ -58,6 +58,10 @@ BUILD_TIMEOUT=196 BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-0.3.1" +# Cinder/Volume variables +TEMPEST_VOLUME_DRIVER=${TEMPEST_VOLUME_DRIVER:-default} +TEMPEST_VOLUME_VENDOR=${TEMPEST_VOLUME_VENDOR:-"Open Source"} +TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-iSCSI} # Functions # --------- @@ -83,6 +87,11 @@ function configure_tempest() { local boto_instance_type="m1.tiny" local ssh_connect_method="fixed" + if [[ ! -d $TEMPEST_CONFIG_DIR ]]; then + sudo mkdir -p $TEMPEST_CONFIG_DIR + fi + sudo chown $STACK_USER $TEMPEST_CONFIG_DIR + # TODO(afazekas): # sudo python setup.py deploy @@ -133,7 +142,8 @@ function configure_tempest() { # Create tempest.conf from tempest.conf.sample # copy every time, because the image UUIDS are going to change - cp $TEMPEST_CONF.sample $TEMPEST_CONF + sudo cp $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG + sudo chmod 644 $TEMPEST_CONFIG password=${ADMIN_PASSWORD:-secrete} @@ -224,121 +234,126 @@ function configure_tempest() { fi # Oslo - iniset $TEMPEST_CONF DEFAULT lock_path $TEMPEST_STATE_PATH + iniset $TEMPEST_CONFIG DEFAULT lock_path $TEMPEST_STATE_PATH mkdir -p $TEMPEST_STATE_PATH - iniset $TEMPEST_CONF DEFAULT use_stderr False - iniset $TEMPEST_CONF DEFAULT log_file tempest.log - iniset $TEMPEST_CONF DEFAULT debug True + iniset $TEMPEST_CONFIG DEFAULT use_stderr False + iniset $TEMPEST_CONFIG DEFAULT log_file tempest.log + iniset $TEMPEST_CONFIG DEFAULT debug True # Timeouts - iniset $TEMPEST_CONF compute build_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONF volume build_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONF boto build_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONF compute build_interval $BUILD_INTERVAL - iniset $TEMPEST_CONF volume build_interval $BUILD_INTERVAL - iniset $TEMPEST_CONF boto build_interval $BUILD_INTERVAL - iniset $TEMPEST_CONF boto http_socket_timeout 5 + iniset $TEMPEST_CONFIG compute build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONFIG volume build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONFIG boto build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONFIG compute build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONFIG volume build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONFIG boto build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONFIG boto http_socket_timeout 5 # Identity - iniset $TEMPEST_CONF identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" - iniset $TEMPEST_CONF identity password "$password" - iniset $TEMPEST_CONF identity alt_username $ALT_USERNAME - iniset $TEMPEST_CONF identity alt_password "$password" - iniset $TEMPEST_CONF identity alt_tenant_name $ALT_TENANT_NAME - iniset $TEMPEST_CONF identity admin_password "$password" + iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" + iniset $TEMPEST_CONFIG identity password "$password" + iniset $TEMPEST_CONFIG identity alt_username $ALT_USERNAME + iniset $TEMPEST_CONFIG identity alt_password "$password" + iniset $TEMPEST_CONFIG identity alt_tenant_name $ALT_TENANT_NAME + iniset $TEMPEST_CONFIG identity admin_password "$password" # Image # for the gate we want to be able to override this variable so we aren't # doing an HTTP fetch over the wide internet for this test if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then - iniset $TEMPEST_CONF image http_image $TEMPEST_HTTP_IMAGE + iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE fi # Compute - iniset $TEMPEST_CONF compute change_password_available False + iniset $TEMPEST_CONFIG compute change_password_available False # Note(nati) current tempest don't create network for each tenant # so reuse same tenant for now if is_service_enabled neutron; then TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-False} fi - iniset $TEMPEST_CONF compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} - iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED - iniset $TEMPEST_CONF compute network_for_ssh $PRIVATE_NETWORK_NAME - iniset $TEMPEST_CONF compute ip_version_for_ssh 4 - iniset $TEMPEST_CONF compute ssh_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONF compute image_ref $image_uuid - iniset $TEMPEST_CONF compute image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} - iniset $TEMPEST_CONF compute image_ref_alt $image_uuid_alt - iniset $TEMPEST_CONF compute image_alt_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} - iniset $TEMPEST_CONF compute flavor_ref $flavor_ref - iniset $TEMPEST_CONF compute flavor_ref_alt $flavor_ref_alt - iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} - iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} - iniset $TEMPEST_CONF compute ssh_connect_method $ssh_connect_method + iniset $TEMPEST_CONFIG compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} + iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED + iniset $TEMPEST_CONFIG compute network_for_ssh $PRIVATE_NETWORK_NAME + iniset $TEMPEST_CONFIG compute ip_version_for_ssh 4 + iniset $TEMPEST_CONFIG compute ssh_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONFIG compute image_ref $image_uuid + iniset $TEMPEST_CONFIG compute image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid_alt + iniset $TEMPEST_CONFIG compute image_alt_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref + iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt + iniset $TEMPEST_CONFIG compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} + iniset $TEMPEST_CONFIG compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} + iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method # Compute admin - iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED + iniset $TEMPEST_CONFIG "compute-admin" password "$password" # DEPRECATED - iniset $TEMPEST_CONF network api_version 2.0 - iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable" - iniset $TEMPEST_CONF network public_network_id "$public_network_id" - iniset $TEMPEST_CONF network public_router_id "$public_router_id" - iniset $TEMPEST_CONF network default_network "$FIXED_RANGE" + iniset $TEMPEST_CONFIG network api_version 2.0 + iniset $TEMPEST_CONFIG network tenant_networks_reachable "$tenant_networks_reachable" + iniset $TEMPEST_CONFIG network public_network_id "$public_network_id" + iniset $TEMPEST_CONFIG network public_router_id "$public_router_id" + iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE" # boto - iniset $TEMPEST_CONF boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" - iniset $TEMPEST_CONF boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}" - iniset $TEMPEST_CONF boto s3_materials_path "$BOTO_MATERIALS_PATH" - iniset $TEMPEST_CONF boto instance_type "$boto_instance_type" - iniset $TEMPEST_CONF boto http_socket_timeout 30 - iniset $TEMPEST_CONF boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" + iniset $TEMPEST_CONFIG boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}" + iniset $TEMPEST_CONFIG boto s3_materials_path "$BOTO_MATERIALS_PATH" + iniset $TEMPEST_CONFIG boto instance_type "$boto_instance_type" + iniset $TEMPEST_CONFIG boto http_socket_timeout 30 + iniset $TEMPEST_CONFIG boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # Orchestration test image if [[ ! -z "$HEAT_FETCHED_TEST_IMAGE" ]]; then - iniset $TEMPEST_CONF orchestration image_ref "$HEAT_FETCHED_TEST_IMAGE" + iniset $TEMPEST_CONFIG orchestration image_ref "$HEAT_FETCHED_TEST_IMAGE" elif [[ "$HEAT_CREATE_TEST_IMAGE" = "True" ]]; then disk_image_create /usr/share/tripleo-image-elements "vm fedora heat-cfntools" "i386" "fedora-vm-heat-cfntools-tempest" - iniset $TEMPEST_CONF orchestration image_ref "fedora-vm-heat-cfntools-tempest" + iniset $TEMPEST_CONFIG orchestration image_ref "fedora-vm-heat-cfntools-tempest" fi # Scenario - iniset $TEMPEST_CONF scenario img_dir "$FILES/images/cirros-0.3.1-x86_64-uec" + iniset $TEMPEST_CONFIG scenario img_dir "$FILES/images/cirros-0.3.1-x86_64-uec" # Large Ops Number - iniset $TEMPEST_CONF scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} + iniset $TEMPEST_CONFIG scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} # Volume if is_service_enabled c-bak; then - iniset $TEMPEST_CONF volume volume_backup_enabled "True" + iniset $TEMPEST_CONFIG volume volume_backup_enabled "True" fi CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND) if [ $CINDER_MULTI_LVM_BACKEND == "True" ]; then - iniset $TEMPEST_CONF volume multi_backend_enabled "True" - iniset $TEMPEST_CONF volume backend1_name "LVM_iSCSI" - iniset $TEMPEST_CONF volume backend2_name "LVM_iSCSI_2" + iniset $TEMPEST_CONFIG volume multi_backend_enabled "True" + iniset $TEMPEST_CONFIG volume backend1_name "LVM_iSCSI" + iniset $TEMPEST_CONFIG volume backend2_name "LVM_iSCSI_2" + fi + + if [ $TEMPEST_VOLUME_DRIVER != "default" ]; then + iniset $TEMPEST_CONFIG volume vendor_name $TEMPEST_VOLUME_VENDOR + iniset $TEMPEST_CONFIG volume storage_protocol $TEMPEST_STORAGE_PROTOCOL fi # Dashboard - iniset $TEMPEST_CONF dashboard dashboard_url "http://$SERVICE_HOST/" - iniset $TEMPEST_CONF dashboard login_url "http://$SERVICE_HOST/auth/login/" + iniset $TEMPEST_CONFIG dashboard dashboard_url "http://$SERVICE_HOST/" + iniset $TEMPEST_CONFIG dashboard login_url "http://$SERVICE_HOST/auth/login/" # cli - iniset $TEMPEST_CONF cli cli_dir $NOVA_BIN_DIR + iniset $TEMPEST_CONFIG cli cli_dir $NOVA_BIN_DIR # Networking - iniset $TEMPEST_CONF network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}" + iniset $TEMPEST_CONFIG network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}" # service_available for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna trove marconi; do if is_service_enabled $service ; then - iniset $TEMPEST_CONF service_available $service "True" + iniset $TEMPEST_CONFIG service_available $service "True" else - iniset $TEMPEST_CONF service_available $service "False" + iniset $TEMPEST_CONFIG service_available $service "False" fi done echo "Created tempest configuration file:" - cat $TEMPEST_CONF + cat $TEMPEST_CONFIG # Restore IFS IFS=$ifs From db20cd5436ec6301b134f2d92053cb98fb15717b Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Sun, 5 Jan 2014 07:41:30 -0800 Subject: [PATCH 0064/4119] Add Neutron/NSX plugin sanity check Supports-blueprint: nvp-third-party-support (aka bp vmware-nsx-third-party) Related-bug: #1265671 Change-Id: Ifa4e1d36b8735e81f24b8852103a9c433d736e84 --- lib/neutron_thirdparty/vmware_nsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx index 7c6202723f..4eb177a458 100644 --- a/lib/neutron_thirdparty/vmware_nsx +++ b/lib/neutron_thirdparty/vmware_nsx @@ -79,7 +79,7 @@ function stop_vmware_nsx() { } function check_vmware_nsx() { - : + neutron-check-nsx-config $NEUTRON_CONF_DIR/plugins/vmware/nsx.ini } # Restore xtrace From 53ffc713b1d352a9ecf701b452e8e6659daf9748 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 17 Dec 2013 11:13:40 -0600 Subject: [PATCH 0065/4119] clean.sh updates * Clean out data, log and state dirs * Include lib/apache to clear is_apache_enabled_service not found error * Clean errors removing tgt config files * Clean errors removing VG backing file in lib/cinder Change-Id: I33dfde17eb8daaaed7f7e76337fe6a8085a266bf --- clean.sh | 26 ++++++++++++++++---------- lib/cinder | 4 ++-- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/clean.sh b/clean.sh index 480a81214f..e16bdb7f36 100755 --- a/clean.sh +++ b/clean.sh @@ -30,13 +30,17 @@ fi # and ``DISTRO`` GetDistro +# Import apache functions +source $TOP_DIR/lib/apache +source $TOP_DIR/lib/ldap # Import database library source $TOP_DIR/lib/database source $TOP_DIR/lib/rpc_backend -source $TOP_DIR/lib/oslo source $TOP_DIR/lib/tls + +source $TOP_DIR/lib/oslo source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance @@ -47,7 +51,9 @@ source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal -source $TOP_DIR/lib/ldap +source $TOP_DIR/lib/ironic +source $TOP_DIR/lib/trove + # Extras Source # -------------- @@ -95,13 +101,6 @@ if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then cleanup_nova_hypervisor fi -# cinder doesn't always clean up the volume group as it might be used elsewhere... -# clean it up if it is a loop device -VG_DEV=$(sudo losetup -j $DATA_DIR/${VOLUME_GROUP}-backing-file | awk -F':' '/backing-file/ { print $1}') -if [[ -n "$VG_DEV" ]]; then - sudo losetup -d $VG_DEV -fi - #if mount | grep $DATA_DIR/swift/drives; then # sudo umount $DATA_DIR/swift/drives/sdb1 #fi @@ -111,12 +110,19 @@ fi sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift # Clean out tgt -sudo rm /etc/tgt/conf.d/* +sudo rm -f /etc/tgt/conf.d/* # Clean up the message queue cleanup_rpc_backend cleanup_database +# Clean out data, logs and status +LOGDIR=$(dirname "$LOGFILE") +sudo rm -rf $DATA_DIR $LOGDIR $DEST/status +if [[ -n "$SCREEN_LOGDIR" ]] && [[ -d "$SCREEN_LOGDIR" ]]; then + sudo rm -rf $SCREEN_LOGDIR +fi + # Clean up networking... # should this be in nova? # FIXED_IP_ADDR in br100 diff --git a/lib/cinder b/lib/cinder index cbe732e9b0..45a9a25dc8 100644 --- a/lib/cinder +++ b/lib/cinder @@ -109,8 +109,8 @@ function _clean_lvm_backing_file() { # of the backing file if [ -z "`sudo lvs --noheadings -o lv_name $vg`" ]; then # if the backing physical device is a loop device, it was probably setup by devstack - VG_DEV=$(sudo losetup -j $DATA_DIR/${vg}-backing-file | awk -F':' '/backing-file/ { print $1}') - if [[ -n "$VG_DEV" ]]; then + if [[ -n "$VG_DEV" ]] && [[ -e "$VG_DEV" ]]; then + VG_DEV=$(sudo losetup -j $DATA_DIR/${vg}-backing-file | awk -F':' '/backing-file/ { print $1}') sudo losetup -d $VG_DEV rm -f $DATA_DIR/${vg}-backing-file fi From 38e38fb16d5d597e41c486812ae7ba480696b31c Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 10 Jan 2014 12:05:51 -0600 Subject: [PATCH 0066/4119] Update samples * Skip commands for services that are not started in local.sh * Rename localrc to local.conf Change-Id: Ida3a8cc836d56db94da4a133fbeb81c7f5fc5f26 --- samples/{localrc => local.conf} | 13 ++++--- samples/local.sh | 60 +++++++++++++++++---------------- 2 files changed, 39 insertions(+), 34 deletions(-) rename samples/{localrc => local.conf} (87%) diff --git a/samples/localrc b/samples/local.conf similarity index 87% rename from samples/localrc rename to samples/local.conf index 80cf0e75ac..c8126c22af 100644 --- a/samples/localrc +++ b/samples/local.conf @@ -1,19 +1,22 @@ -# Sample ``localrc`` for user-configurable variables in ``stack.sh`` +# Sample ``local.conf`` for user-configurable variables in ``stack.sh`` # NOTE: Copy this file to the root ``devstack`` directory for it to # work properly. -# ``localrc`` is a user-maintained setings file that is sourced from ``stackrc``. +# ``local.conf`` is a user-maintained setings file that is sourced from ``stackrc``. # This gives it the ability to override any variables set in ``stackrc``. # Also, most of the settings in ``stack.sh`` are written to only be set if no -# value has already been set; this lets ``localrc`` effectively override the +# value has already been set; this lets ``local.conf`` effectively override the # default values. # This is a collection of some of the settings we have found to be useful # in our DevStack development environments. Additional settings are described -# in http://devstack.org/localrc.html +# in http://devstack.org/local.conf.html # These should be considered as samples and are unsupported DevStack code. +# The ``localrc`` section replaces the old ``localrc`` configuration file. +# Note that if ``localrc`` is present it will be used in favor of this section. +[[local|localrc]] # Minimal Contents # ---------------- @@ -22,7 +25,7 @@ # there are a few minimal variables set: # If the ``*_PASSWORD`` variables are not set here you will be prompted to enter -# values for them by ``stack.sh`` and they will be added to ``localrc``. +# values for them by ``stack.sh`` and they will be added to ``local.conf``. ADMIN_PASSWORD=nomoresecrete MYSQL_PASSWORD=stackdb RABBIT_PASSWORD=stackqueue diff --git a/samples/local.sh b/samples/local.sh index 970cbb97e0..664cb663fe 100755 --- a/samples/local.sh +++ b/samples/local.sh @@ -23,45 +23,47 @@ source $TOP_DIR/stackrc # Destination path for installation ``DEST`` DEST=${DEST:-/opt/stack} +if is_service_enabled nova; then -# Import ssh keys -# --------------- + # Import ssh keys + # --------------- -# Import keys from the current user into the default OpenStack user (usually -# ``demo``) + # Import keys from the current user into the default OpenStack user (usually + # ``demo``) -# Get OpenStack auth -source $TOP_DIR/openrc + # Get OpenStack user auth + source $TOP_DIR/openrc -# Add first keypair found in localhost:$HOME/.ssh -for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do - if [[ -r $i ]]; then - nova keypair-add --pub_key=$i `hostname` - break - fi -done + # Add first keypair found in localhost:$HOME/.ssh + for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do + if [[ -r $i ]]; then + nova keypair-add --pub_key=$i `hostname` + break + fi + done -# Create A Flavor -# --------------- + # Create A Flavor + # --------------- -# Get OpenStack admin auth -source $TOP_DIR/openrc admin admin + # Get OpenStack admin auth + source $TOP_DIR/openrc admin admin -# Name of new flavor -# set in ``localrc`` with ``DEFAULT_INSTANCE_TYPE=m1.micro`` -MI_NAME=m1.micro + # Name of new flavor + # set in ``localrc`` with ``DEFAULT_INSTANCE_TYPE=m1.micro`` + MI_NAME=m1.micro -# Create micro flavor if not present -if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then - nova flavor-create $MI_NAME 6 128 0 1 -fi + # Create micro flavor if not present + if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then + nova flavor-create $MI_NAME 6 128 0 1 + fi -# Other Uses -# ---------- + # Other Uses + # ---------- -# Add tcp/22 and icmp to default security group -nova secgroup-add-rule default tcp 22 22 0.0.0.0/0 -nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0 + # Add tcp/22 and icmp to default security group + nova secgroup-add-rule default tcp 22 22 0.0.0.0/0 + nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0 +fi From fbe12f988cd1026b2f074a5b5bfe15ff19171b90 Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Sun, 26 Jan 2014 22:57:47 +0900 Subject: [PATCH 0067/4119] Add 'bc' to files/apts/general After commit def4c141, "bc" command is used in stack.sh, but 'bc' command is not available in very minimal Ubuntu installation (without any tasks installed). We need to add 'bc' to required package list. Closes-Bug: #1272914 Change-Id: I5797707e8eaa9dd2a21d1a1fc3af028d1951a2ee --- files/apts/general | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apts/general b/files/apts/general index aff687fab4..32d31f0642 100644 --- a/files/apts/general +++ b/files/apts/general @@ -21,3 +21,4 @@ euca2ools # only for testing client tar python-cmd2 # dist:precise python2.7 +bc From c38d864cfb43592a4985441cc5c3de89d572c32e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Sun, 26 Jan 2014 13:01:30 -0500 Subject: [PATCH 0068/4119] remove setting up fatal_deprecations using fatal deprecations only means you can't add new deprecations to the code base, which isn't helpful in actually deprecating features in a user friendly way. Change-Id: I26468f4c221a14f2eea746439d46e5fa192cfc57 --- lib/nova | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/nova b/lib/nova index a4edb53cf8..b85f0941f3 100644 --- a/lib/nova +++ b/lib/nova @@ -379,7 +379,6 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT metadata_workers "4" iniset $NOVA_CONF conductor workers "4" iniset $NOVA_CONF DEFAULT sql_connection `database_connection_url nova` - iniset $NOVA_CONF DEFAULT fatal_deprecations "True" iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF osapi_v3 enabled "True" From e61bc61a31ba05c9af5d0801d2f120e919e0bd5f Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Mon, 27 Jan 2014 15:21:29 +1300 Subject: [PATCH 0069/4119] Use HOST_IP instead of SERVICE_HOST for heat API conf Heat config values heat_metadata_server_url, heat_waitcondition_server_url and heat_waitcondition_server_url currently derive their host from devstack SERVICE_HOST. In gating this is set to 127.0.0.1, which would explain why nova servers are not reaching heat with waitcondition signalling. This change uses HOST_IP as the default instead of SERVICE_HOST. Change-Id: I373b086e3a36a3484cfd34f0d1c8c168ac6d465d --- lib/heat | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/heat b/lib/heat index 0307c64ae1..b9b8aa66ca 100644 --- a/lib/heat +++ b/lib/heat @@ -60,13 +60,13 @@ function configure_heat() { # remove old config files rm -f $HEAT_CONF_DIR/heat-*.conf - HEAT_API_CFN_HOST=${HEAT_API_CFN_HOST:-$SERVICE_HOST} + HEAT_API_CFN_HOST=${HEAT_API_CFN_HOST:-$HOST_IP} HEAT_API_CFN_PORT=${HEAT_API_CFN_PORT:-8000} HEAT_ENGINE_HOST=${HEAT_ENGINE_HOST:-$SERVICE_HOST} HEAT_ENGINE_PORT=${HEAT_ENGINE_PORT:-8001} - HEAT_API_CW_HOST=${HEAT_API_CW_HOST:-$SERVICE_HOST} + HEAT_API_CW_HOST=${HEAT_API_CW_HOST:-$HOST_IP} HEAT_API_CW_PORT=${HEAT_API_CW_PORT:-8003} - HEAT_API_HOST=${HEAT_API_HOST:-$SERVICE_HOST} + HEAT_API_HOST=${HEAT_API_HOST:-$HOST_IP} HEAT_API_PORT=${HEAT_API_PORT:-8004} HEAT_API_PASTE_FILE=$HEAT_CONF_DIR/api-paste.ini HEAT_POLICY_FILE=$HEAT_CONF_DIR/policy.json From daa9a734e2fe008a32ed0f98501e2ce2f80167c8 Mon Sep 17 00:00:00 2001 From: Hirofumi Ichihara Date: Mon, 27 Jan 2014 14:54:02 +0900 Subject: [PATCH 0070/4119] Repeatedly add log_filters,log_outputs to libvirtd.conf when restart Change-Id: I14f07f3164f9201305ed1e94e9277a5a5792e850 Closes-bug: 1273058 --- lib/nova | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index b85f0941f3..dbaa3f53d9 100644 --- a/lib/nova +++ b/lib/nova @@ -652,8 +652,12 @@ function start_nova_compute() { local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" local log_outputs="1:file:/var/log/libvirt/libvirtd.log" # Enable server side traces for libvirtd - echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf - echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf + if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then + echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi + if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then + echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi # The group **$LIBVIRT_GROUP** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. From 315f7b0747effbd490ff3b25d85bc6399ed290a1 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 27 Jan 2014 09:40:29 +0100 Subject: [PATCH 0071/4119] Use service postgresql initdb with el6 postgresql-setup does not exists on el6, the service postgresql initdb is the documented db init command. Change-Id: I2b92a3c8e7db603eb13378e46893fc81f507405b --- lib/databases/postgresql | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 60e5a33715..c459feb9e0 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -43,7 +43,13 @@ function configure_database_postgresql { if is_fedora; then PG_HBA=/var/lib/pgsql/data/pg_hba.conf PG_CONF=/var/lib/pgsql/data/postgresql.conf - sudo [ -e $PG_HBA ] || sudo postgresql-setup initdb + if ! sudo [ -e $PG_HBA ]; then + if ! [[ $DISTRO =~ (rhel6) ]]; then + sudo postgresql-setup initdb + else + sudo service postgresql initdb + fi + fi elif is_ubuntu; then PG_DIR=`find /etc/postgresql -name pg_hba.conf|xargs dirname` PG_HBA=$PG_DIR/pg_hba.conf From e7b6399d455ea3f44c46448449cc90d55356f23e Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 27 Jan 2014 11:44:03 +0100 Subject: [PATCH 0072/4119] Install bc with all distribution After commit def4c141 the bc is requred for devstack install on minimal image, commit fbe12f98 fixed the issue with ubuntu, but not with other distribution. Adding bc to the files/rpms-suse/general and files/rpms/general. Change-Id: Ieb2e3e2af454bca03bb3d7565ff731dc357e699f --- files/rpms-suse/general | 1 + files/rpms/general | 1 + 2 files changed, 2 insertions(+) diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 98c279581e..704947ea53 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -17,6 +17,7 @@ tcpdump unzip vim-enhanced wget +bc findutils-locate # useful when debugging lsof # useful when debugging diff --git a/files/rpms/general b/files/rpms/general index 40246ea4ab..6cfe31eaf1 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -21,6 +21,7 @@ tcpdump unzip wget which +bc # [1] : some of installed tools have unversioned dependencies on this, # but others have versioned (<=0.7). So if a later version (0.7.1) From d8416d7c1c71c82fa9c0f0e7a6518ce043bff120 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 27 Jan 2014 15:36:06 -0500 Subject: [PATCH 0073/4119] allow for upgrade of the precise kernel we are getting kernel crashes in the OpenStack gate, to test getting around this we'd like devstack to be able to upgrade the precise kernel to the latest lts backported kernel. default to off Change-Id: I5d47aa8d15b1b1c0386a13b65022f6b8108c5c49 --- tools/fixup_stuff.sh | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 5fb47dc29b..a28e10ef2d 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -69,6 +69,22 @@ if [[ -d $dir ]]; then sudo chmod +r $dir/* fi +# Ubuntu 12.04 +# ----- +# We can regularly get kernel crashes on the 12.04 default kernel, so attempt +# to install a new kernel +if [[ ${DISTRO} =~ (precise) ]]; then + # Finally, because we suspect the Precise kernel is problematic, install a new kernel + UPGRADE_KERNEL=$(trueorfalse False $UPGRADE_KERNEL) + if [[ $UPGRADE_KERNEL == "True" ]]; then + if [[ ! `uname -r` =~ (^3\.11) ]]; then + apt_get install linux-generic-lts-saucy + echo "Installing Saucy LTS kernel, please reboot before proceeding" + exit 1 + fi + fi +fi + # RHEL6 # ----- From bb8227ce69b9b040b98dbe339e4f5c02172d19ac Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 27 Jan 2014 12:21:52 -0600 Subject: [PATCH 0074/4119] Fix Swift process kill stop_swift() was not killing all swift processes properly. Change to manually clean up all screen services with pkill. Closes-bug: 1268794 Change-Id: Ibb7a2e0dd10a313609f05963264087f82f6f00e2 --- lib/swift | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/swift b/lib/swift index 44c230be93..37b630c3fa 100644 --- a/lib/swift +++ b/lib/swift @@ -652,8 +652,10 @@ function stop_swift() { if type -p swift-init >/dev/null; then swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true fi - # Dump the proxy server - sudo pkill -f swift-proxy-server + for type in proxy object container account; do + # Dump all of the servers + pkill -f swift- + done } # Restore xtrace From fc744f9713fcccfebeb52e35c7fc1ce955b89200 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 27 Jan 2014 13:45:21 -0600 Subject: [PATCH 0075/4119] Convert trove to plugin Also adds an is_trove_enabled() function to match https://review.openstack.org/69497 changes for is_service_enabled(). Change-Id: Ic0408ff6d9816aec8a3506931470470342a5dcd7 --- extras.d/70-trove | 33 +++++++++++++++++++++++++++++++++ lib/trove | 10 ++++++++++ stack.sh | 26 +------------------------- unstack.sh | 4 ---- 4 files changed, 44 insertions(+), 29 deletions(-) create mode 100644 extras.d/70-trove diff --git a/extras.d/70-trove b/extras.d/70-trove new file mode 100644 index 0000000000..a4dc7fbc5b --- /dev/null +++ b/extras.d/70-trove @@ -0,0 +1,33 @@ +# trove.sh - Devstack extras script to install Trove + +if is_service_enabled trove; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/trove + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + echo_summary "Installing Trove" + install_trove + install_troveclient + cleanup_trove + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + echo_summary "Configuring Trove" + configure_troveclient + configure_trove + + if is_service_enabled key; then + create_trove_accounts + fi + + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + # Initialize trove + init_trove + + # Start the trove API and trove taskmgr components + echo_summary "Starting Trove" + start_trove + fi + + if [[ "$1" == "unstack" ]]; then + stop_trove + fi +fi diff --git a/lib/trove b/lib/trove index 8e817f5145..9c91024211 100644 --- a/lib/trove +++ b/lib/trove @@ -38,6 +38,16 @@ else TROVE_BIN_DIR=$(get_python_exec_prefix) fi +# Functions +# --------- + +# Test if any Trove services are enabled +# is_trove_enabled +function is_trove_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"tr-" ]] && return 0 + return 1 +} + # setup_trove_logging() - Adds logging configuration to conf files function setup_trove_logging() { local CONF=$1 diff --git a/stack.sh b/stack.sh index a2469f1868..45d47c819c 100755 --- a/stack.sh +++ b/stack.sh @@ -3,7 +3,7 @@ # ``stack.sh`` is an opinionated OpenStack developer installation. It # installs and configures various combinations of **Ceilometer**, **Cinder**, # **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron**, -# **Swift**, and **Trove** +# and **Swift** # This script allows you to specify configuration options of what git # repositories to use, enabled services, network configuration and various @@ -337,7 +337,6 @@ source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap source $TOP_DIR/lib/ironic -source $TOP_DIR/lib/trove # Extras Source # -------------- @@ -739,12 +738,6 @@ if is_service_enabled heat; then configure_heat fi -if is_service_enabled trove; then - install_trove - install_troveclient - cleanup_trove -fi - if is_service_enabled tls-proxy; then configure_CA init_CA @@ -927,10 +920,6 @@ if is_service_enabled key; then create_cinder_accounts create_neutron_accounts - if is_service_enabled trove; then - create_trove_accounts - fi - if is_service_enabled ceilometer; then create_ceilometer_accounts fi @@ -1204,19 +1193,6 @@ if is_service_enabled heat; then start_heat fi -# Configure and launch the trove service api, and taskmanager -if is_service_enabled trove; then - # Initialize trove - echo_summary "Configuring Trove" - configure_troveclient - configure_trove - init_trove - - # Start the trove API and trove taskmgr components - echo_summary "Starting Trove" - start_trove -fi - # Create account rc files # ======================= diff --git a/unstack.sh b/unstack.sh index 31f6f01c8f..92d0642c38 100755 --- a/unstack.sh +++ b/unstack.sh @@ -56,7 +56,6 @@ source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap source $TOP_DIR/lib/ironic -source $TOP_DIR/lib/trove # Extras Source # -------------- @@ -92,9 +91,6 @@ if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then fi # Call service stop -if is_service_enabled trove; then - stop_trove -fi if is_service_enabled heat; then stop_heat From abb7df152328fd83924070c4c40843847fb6d87a Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Tue, 28 Jan 2014 22:38:06 +0400 Subject: [PATCH 0076/4119] Include SAVANNA_CONF_DIR into SAVANNA_CONF_FILE It's the commom way of using X_CONF_FILE variable. Change-Id: Ibc284be44ffdd25be3191913c78424cbf06b2bb0 --- lib/savanna | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/lib/savanna b/lib/savanna index c7d59f79c4..de2044318b 100644 --- a/lib/savanna +++ b/lib/savanna @@ -26,7 +26,7 @@ SAVANNA_BRANCH=${SAVANNA_BRANCH:-master} # Set up default directories SAVANNA_DIR=$DEST/savanna SAVANNA_CONF_DIR=${SAVANNA_CONF_DIR:-/etc/savanna} -SAVANNA_CONF_FILE=savanna.conf +SAVANNA_CONF_FILE=${SAVANNA_CONF_DIR}/savanna.conf SAVANNA_DEBUG=${SAVANNA_DEBUG:-True} SAVANNA_SERVICE_HOST=${SAVANNA_SERVICE_HOST:-$SERVICE_HOST} @@ -88,24 +88,24 @@ function configure_savanna() { sudo chown $STACK_USER $SAVANNA_CONF_DIR # Copy over savanna configuration file and configure common parameters. - cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE + cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_FILE - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_username savanna - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG + iniset $SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD + iniset $SAVANNA_CONF_FILE DEFAULT os_admin_username savanna + iniset $SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME + iniset $SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database connection `database_connection_url savanna` + iniset $SAVANNA_CONF_FILE database connection `database_connection_url savanna` if is_service_enabled neutron; then - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_neutron true - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_floating_ips true + iniset $SAVANNA_CONF_FILE DEFAULT use_neutron true + iniset $SAVANNA_CONF_FILE DEFAULT use_floating_ips true fi - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG + iniset $SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG recreate_database savanna utf8 - $SAVANNA_BIN_DIR/savanna-db-manage --config-file $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE upgrade head + $SAVANNA_BIN_DIR/savanna-db-manage --config-file $SAVANNA_CONF_FILE upgrade head } # install_savanna() - Collect source and prepare @@ -116,7 +116,7 @@ function install_savanna() { # start_savanna() - Start running processes, including screen function start_savanna() { - screen_it savanna "cd $SAVANNA_DIR && $SAVANNA_BIN_DIR/savanna-api --config-file $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE" + screen_it savanna "cd $SAVANNA_DIR && $SAVANNA_BIN_DIR/savanna-api --config-file $SAVANNA_CONF_FILE" } # stop_savanna() - Stop running processes From 1f76328027bb5cee0b0ea7077f4c59c919f1c4ae Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Tue, 28 Jan 2014 23:01:38 +0100 Subject: [PATCH 0077/4119] Stop all neutron-ns-metadata-proxy with stop_neutron Process name is actually python therefore neutron-ns-metadata-proxy pattern didn't match wanted process. Closes-bug: #1269982 Change-Id: Ib4439b0d32f103253b461841fa903c65763ff280 --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 960f11b154..f9ee484607 100644 --- a/lib/neutron +++ b/lib/neutron @@ -505,7 +505,7 @@ function stop_neutron() { [ ! -z "$pid" ] && sudo kill -9 $pid fi if is_service_enabled q-meta; then - sudo pkill -9 neutron-ns-metadata-proxy || : + sudo pkill -9 -f neutron-ns-metadata-proxy || : fi if is_service_enabled q-lbaas; then From 4a0cd374e2911adb33af44fa6643d6323ea523e6 Mon Sep 17 00:00:00 2001 From: shalini khandelwal Date: Wed, 29 Jan 2014 09:48:15 +0000 Subject: [PATCH 0078/4119] Renamed file 70-trove to 70-trove.sh Reason: Devstack not installing trove stack.sh ignores the trove installation script(70-trove) Change-Id: I3f179a6b5ded46e9f96a1c4bcc673ec52fa8bf0e Closes-Bug: #1274022 --- extras.d/{70-trove => 70-trove.sh} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename extras.d/{70-trove => 70-trove.sh} (100%) diff --git a/extras.d/70-trove b/extras.d/70-trove.sh similarity index 100% rename from extras.d/70-trove rename to extras.d/70-trove.sh From f2c1a712e82ac1d347b0fb6526c79471a9ef8d55 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 29 Jan 2014 21:38:14 +0000 Subject: [PATCH 0079/4119] Copy container-sync-realms.conf in /etc/swift We need the new container-sync realms configuration or we will get a nasty harmless error opening file at swift proxy startup. Change-Id: If939da305dcb9403c418219032ac6b50b0099bd3 Closes-Bug: 1274295 --- lib/swift | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/swift b/lib/swift index 37b630c3fa..baa03ec5b8 100644 --- a/lib/swift +++ b/lib/swift @@ -258,6 +258,8 @@ function configure_swift() { SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONF_DIR}/proxy-server.conf cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER} + cp ${SWIFT_DIR}/etc/container-sync-realms.conf-sample ${SWIFT_CONF_DIR}/container-sync-realms.conf + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${STACK_USER} From 4237f590b7b93117e59f9f777bc70d212969f61a Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 29 Jan 2014 16:22:11 -0600 Subject: [PATCH 0080/4119] Generate Tempest service list rather than hard-code it The list of services that Tempest used to set its 'service_available' config values was hard-coded. To be plugin-friendly have each service (project) add its name to the TEMPEST_SERVICES variable and use that for setting the 'service_avilable' values. Change-Id: I208efd7fd0798b18ac2e6353ee70b773e84a2683 --- lib/ceilometer | 5 ++++- lib/cinder | 3 +++ lib/glance | 3 +++ lib/heat | 4 ++++ lib/horizon | 3 +++ lib/ironic | 3 +++ lib/marconi | 4 ++++ lib/neutron | 4 ++++ lib/nova | 3 +++ lib/savanna | 4 ++++ lib/swift | 3 +++ lib/tempest | 2 +- lib/trove | 4 ++++ stackrc | 6 ++++++ 14 files changed, 49 insertions(+), 2 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 6f3896f2d4..30bf3aed50 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -52,7 +52,10 @@ CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql} CEILOMETER_SERVICE_PROTOCOL=http CEILOMETER_SERVICE_HOST=$SERVICE_HOST CEILOMETER_SERVICE_PORT=${CEILOMETER_SERVICE_PORT:-8777} -# + +# Tell Tempest this project is present +TEMPEST_SERVICES+=,ceilometer + # Functions # --------- diff --git a/lib/cinder b/lib/cinder index d76a41d4b8..9f70b2a0c9 100644 --- a/lib/cinder +++ b/lib/cinder @@ -79,6 +79,9 @@ VOLUME_BACKING_DEVICE2=${VOLUME_BACKING_DEVICE2:-} VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,cinder + # Functions # --------- diff --git a/lib/glance b/lib/glance index 55d5fb37ec..2d41ea4653 100644 --- a/lib/glance +++ b/lib/glance @@ -52,6 +52,9 @@ fi # Glance connection info. Note the port must be specified. GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,glance + # Functions # --------- diff --git a/lib/heat b/lib/heat index b9b8aa66ca..467619f3c6 100644 --- a/lib/heat +++ b/lib/heat @@ -38,6 +38,10 @@ HEAT_CONF=$HEAT_CONF_DIR/heat.conf HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates +# Tell Tempest this project is present +TEMPEST_SERVICES+=,heat + + # Functions # --------- diff --git a/lib/horizon b/lib/horizon index 5bff712743..c64d8502ba 100644 --- a/lib/horizon +++ b/lib/horizon @@ -31,6 +31,9 @@ HORIZON_DIR=$DEST/horizon # The example file in Horizon repo is used by default. HORIZON_SETTINGS=${HORIZON_SETTINGS:-$HORIZON_DIR/openstack_dashboard/local/local_settings.py.example} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,horizon + # Functions # --------- diff --git a/lib/ironic b/lib/ironic index afbc3e09e4..b8838f59fb 100644 --- a/lib/ironic +++ b/lib/ironic @@ -42,6 +42,9 @@ IRONIC_BIN_DIR=$(get_python_exec_prefix) IRONIC_SERVICE_PROTOCOL=http IRONIC_HOSTPORT=${IRONIC_HOSTPORT:-$SERVICE_HOST:6385} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,ironic + # Functions # --------- diff --git a/lib/marconi b/lib/marconi index 6b9ffdc0b3..1eaebbdf16 100644 --- a/lib/marconi +++ b/lib/marconi @@ -51,6 +51,10 @@ MARCONI_BRANCH=${MARCONI_BRANCH:-master} MARCONICLIENT_REPO=${MARCONICLIENT_REPO:-${GIT_BASE}/openstack/python-marconiclient.git} MARCONICLIENT_BRANCH=${MARCONICLIENT_BRANCH:-master} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,marconi + + # Functions # --------- diff --git a/lib/neutron b/lib/neutron index 960f11b154..68dfd4a6a3 100644 --- a/lib/neutron +++ b/lib/neutron @@ -237,6 +237,10 @@ else Q_USE_SECGROUP=False fi +# Tell Tempest this project is present +TEMPEST_SERVICES+=,neutron + + # Functions # --------- diff --git a/lib/nova b/lib/nova index dbaa3f53d9..9db19ed532 100644 --- a/lib/nova +++ b/lib/nova @@ -122,6 +122,9 @@ MULTI_HOST=`trueorfalse False $MULTI_HOST` TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,nova + # Functions # --------- diff --git a/lib/savanna b/lib/savanna index c7d59f79c4..176f290c35 100644 --- a/lib/savanna +++ b/lib/savanna @@ -40,6 +40,10 @@ else SAVANNA_BIN_DIR=$(get_python_exec_prefix) fi +# Tell Tempest this project is present +TEMPEST_SERVICES+=,savanna + + # Functions # --------- diff --git a/lib/swift b/lib/swift index 37b630c3fa..afdf995d2e 100644 --- a/lib/swift +++ b/lib/swift @@ -111,6 +111,9 @@ OBJECT_PORT_BASE=${OBJECT_PORT_BASE:-6013} CONTAINER_PORT_BASE=${CONTAINER_PORT_BASE:-6011} ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,swift + # Functions # --------- diff --git a/lib/tempest b/lib/tempest index ef9dfe218b..ee996657c2 100644 --- a/lib/tempest +++ b/lib/tempest @@ -329,7 +329,7 @@ function configure_tempest() { iniset $TEMPEST_CONF network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}" # service_available - for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna trove marconi; do + for service in ${TEMPEST_SERVICES//,/ }; do if is_service_enabled $service ; then iniset $TEMPEST_CONF service_available $service "True" else diff --git a/lib/trove b/lib/trove index 9c91024211..1fd011a530 100644 --- a/lib/trove +++ b/lib/trove @@ -38,6 +38,10 @@ else TROVE_BIN_DIR=$(get_python_exec_prefix) fi +# Tell Tempest this project is present +TEMPEST_SERVICES+=,trove + + # Functions # --------- diff --git a/stackrc b/stackrc index 8a0280ecfa..197b4cfc46 100644 --- a/stackrc +++ b/stackrc @@ -37,6 +37,12 @@ fi # enable_service tempest ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,mysql +# Tell Tempest which services are available. The default is set here as +# Tempest falls late in the configuration sequence. This differs from +# ``ENABLED_SERVICES`` in that the project names are used here rather than +# the service names, i.e.: TEMPEST_SERVICES="key,glance,nova" +TEMPEST_SERVICES="" + # Set the default Nova APIs to enable NOVA_ENABLED_APIS=ec2,osapi_compute,metadata From 6c57fbab26e40af5c5b19b46fb3da39341f34dab Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Wed, 20 Nov 2013 17:00:21 -0800 Subject: [PATCH 0081/4119] Set keystone admin_bind_host to KEYSTONE_SERVICE_HOST On Linux ports 32768-61000 can be used by just about anything needing a socket. Keystone's IANA assigned port is 35357. Occasionally something else will be using port 35357 first because Linux allows this. Workaround is to bind to port 127.0.0.1 instead of 0.0.0.0. $KEYSTONE_SERVICE_HOST gets its value from $SERVICE_HOST which is set to 127.0.0.1 in the gate. "Ephemeral (client) ports will *never* be sourced from 0.0.0.0, and are uniquely identified by the full connection five-tuple (proto, src IP, src port, dst IP, dst port) anyway, allowing them to overlap src IP/src port as long as proto/dst IP/dst port are different. Thus it is up to keystone/devstack to bind more appropriately and not use wildcard bind addresses unless explicitly necessary for some reason. For example, in the log output, the URLs are configured with dst IPs of 127.0.0.1 anyway, so binding explicitly to localhost would change nothing, while skirting this particular edge case nicely." ~Evan Callicoat This doesn't fix bug 1253482 it works around it while a better solution is prepared (running keystone behind apache in devstack). Co-Authored-By: Joe Gordon Change-Id: I112309661dadf8b753c3311182f82464d9d3595e Related-bug: #1253482 --- lib/keystone | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/keystone b/lib/keystone index 0850fb219e..4f7f68b57f 100644 --- a/lib/keystone +++ b/lib/keystone @@ -178,6 +178,7 @@ function configure_keystone() { # Set the URL advertised in the ``versions`` structure returned by the '/' route iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(public_port)s/" iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(admin_port)s/" + iniset $KEYSTONE_CONF DEFAULT admin_bind_host "$KEYSTONE_SERVICE_HOST" # Register SSL certificates if provided if is_ssl_enabled_service key; then From ec5918f2f6ee54c3384e85866e98b67ef01e1e1e Mon Sep 17 00:00:00 2001 From: Ben Nemec Date: Thu, 30 Jan 2014 16:07:23 +0000 Subject: [PATCH 0082/4119] Retry rabbitmq password change Due to the bug referenced below, on Fedora it is possible for the rabbitmq password change to fail the first time rabbitmq is started. This change adds a retry loop to avoid the problem in devstack. One retry should be enough in most (all?) cases, but this will retry up to ten times just to be safe. Note that just retrying the password change is not enough. The rabbitmq-server service must be restarted as well. Change-Id: I403dcd503aa8e74e2ba6312a0decf0d4fd0d8795 bz: https://bugzilla.redhat.com/show_bug.cgi?id=1059028 --- lib/rpc_backend | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index f59c80096f..3651bc0d20 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -139,12 +139,18 @@ function restart_rpc_backend() { if is_service_enabled rabbit; then # Start rabbitmq-server echo_summary "Starting RabbitMQ" - if is_fedora || is_suse; then - # service is not started by default - restart_service rabbitmq-server - fi - # change the rabbit password since the default is "guest" - sudo rabbitmqctl change_password guest $RABBIT_PASSWORD + # NOTE(bnemec): Retry initial rabbitmq configuration to deal with + # the fact that sometimes it fails to start properly. + # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1059028 + for i in `seq 10`; do + if is_fedora || is_suse; then + # service is not started by default + restart_service rabbitmq-server + fi + # change the rabbit password since the default is "guest" + sudo rabbitmqctl change_password guest $RABBIT_PASSWORD && break + [[ $i -eq "10" ]] && die $LINENO "Failed to set rabbitmq password" + done if is_service_enabled n-cell; then # Add partitioned access for the child cell if [ -z `sudo rabbitmqctl list_vhosts | grep child_cell` ]; then From f84eb5ba43ec0d548e59d982ec149a8feaa4d4d0 Mon Sep 17 00:00:00 2001 From: Don Dugger Date: Thu, 30 Jan 2014 09:59:30 -0700 Subject: [PATCH 0083/4119] Add support for Gantt Gantt is the new breakout of the scheduler code from the Nova source tree. These changes allow devstack to install/configure/startup gantt as the scheduler service for openstack. Change-Id: Ia2b6001f5ccf2469ee9fdee67564c9a915a13862 --- extras.d/70-gantt.sh | 31 ++++++++++++++ lib/gantt | 96 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 127 insertions(+) create mode 100644 extras.d/70-gantt.sh create mode 100644 lib/gantt diff --git a/extras.d/70-gantt.sh b/extras.d/70-gantt.sh new file mode 100644 index 0000000000..ac1efba748 --- /dev/null +++ b/extras.d/70-gantt.sh @@ -0,0 +1,31 @@ +# gantt.sh - Devstack extras script to install Gantt + +if is_service_enabled n-sch; then + disable_service gantt +fi + +if is_service_enabled gantt; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/gantt + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + echo_summary "Installing Gantt" + install_gantt + cleanup_gantt + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + echo_summary "Configuring Gantt" + configure_gantt + + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + # Initialize gantt + init_gantt + + # Start gantt + echo_summary "Starting Gantt" + start_gantt + fi + + if [[ "$1" == "unstack" ]]; then + stop_gantt + fi +fi diff --git a/lib/gantt b/lib/gantt new file mode 100644 index 0000000000..832d7590df --- /dev/null +++ b/lib/gantt @@ -0,0 +1,96 @@ +# lib/gantt +# Install and start **Gantt** scheduler service + +# Dependencies: +# +# - functions +# - DEST, DATA_DIR, STACK_USER must be defined + +# stack.sh +# --------- +# - install_gantt +# - configure_gantt +# - init_gantt +# - start_gantt +# - stop_gantt +# - cleanup_gantt + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- + +# set up default directories +GANTT_DIR=$DEST/gantt +GANTT_STATE_PATH=${GANTT_STATE_PATH:=$DATA_DIR/gantt} +GANTT_REPO=${GANTT_REPO:-${GIT_BASE}/openstack/gantt.git} +GANTT_BRANCH=${GANTT_BRANCH:-master} + +GANTTCLIENT_DIR=$DEST/python-ganttclient +GANTTCLIENT_REPO=${GANTT_REPO:-${GIT_BASE}/openstack/python-ganttclient.git} +GANTTCLIENT_BRANCH=${GANTT_BRANCH:-master} + +# eventually we will have a separate gantt config +# file but for compatibility reasone stick with +# nova.conf for now +GANTT_CONF_DIR=${GANTT_CONF_DIR:-/etc/nova} +GANTT_CONF=$GANTT_CONF_DIR/nova.conf + +# Support entry points installation of console scripts +GANTT_BIN_DIR=$(get_python_exec_prefix) + + +# Functions +# --------- + +# cleanup_gantt() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_gantt() { + echo "Cleanup Gantt" +} + +# configure_gantt() - Set config files, create data dirs, etc +function configure_gantt() { + echo "Configure Gantt" +} + +# init_gantt() - Initialize database and volume group +function init_gantt() { + echo "Initialize Gantt" +} + +# install_gantt() - Collect source and prepare +function install_gantt() { + git_clone $GANTT_REPO $GANTT_DIR $GANTT_BRANCH + setup_develop $GANTT_DIR +} + +# install_ganttclient() - Collect source and prepare +function install_ganttclient() { + echo "Install Gantt Client" +# git_clone $GANTTCLIENT_REPO $GANTTCLIENT_DIR $GANTTCLIENT_BRANCH +# setup_develop $GANTTCLIENT_DIR +} + +# start_gantt() - Start running processes, including screen +function start_gantt() { + if is_service_enabled gantt; then + screen_it gantt "cd $GANTT_DIR && $GANTT_BIN_DIR/gantt-scheduler --config-file $GANTT_CONF" + fi +} + +# stop_gantt() - Stop running processes +function stop_gantt() { + echo "Stop Gantt" + screen_stop gantt +} + +# Restore xtrace +$XTRACE + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: From 2dac885e6c48989d9a7bc89aca2b69503d2b3399 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Fri, 31 Jan 2014 01:25:28 -0500 Subject: [PATCH 0084/4119] Pull docker images from global registry The global docker registry is where images are being built and uploaded. It's effectively docker's version of, say, 'pip'. The static tarballs are not only an extra maintenance burden as they're outside the standard build and publishing process, but are presently outside the scope of an open development / release process as well. While this process does cause some trouble with network-independence for CI purposes, the fetching is still done from install-docker.sh; Additionally, this driver is not currently tested via the community CI effort. Change-Id: I3ee6bfee9c273cd3aabe1e00a1d1a8856a466189 --- lib/nova_plugins/hypervisor-docker | 8 ++++---- tools/docker/install_docker.sh | 23 ++++++----------------- 2 files changed, 10 insertions(+), 21 deletions(-) diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index 0153953d6c..bb934b87d6 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -31,10 +31,10 @@ DOCKER_UNIX_SOCKET=/var/run/docker.sock DOCKER_PID_FILE=/var/run/docker.pid DOCKER_REGISTRY_PORT=${DOCKER_REGISTRY_PORT:-5042} -DOCKER_IMAGE=${DOCKER_IMAGE:-http://get.docker.io/images/openstack/docker-ut.tar.gz} -DOCKER_IMAGE_NAME=docker-busybox -DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-http://get.docker.io/images/openstack/docker-registry.tar.gz} -DOCKER_REGISTRY_IMAGE_NAME=docker-registry +DOCKER_IMAGE=${DOCKER_IMAGE:-busybox:latest} +DOCKER_IMAGE_NAME=busybox +DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-registry:latest} +DOCKER_REGISTRY_IMAGE_NAME=registry DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME} DOCKER_APT_REPO=${DOCKER_APT_REPO:-https://get.docker.io/ubuntu} diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh index 375cfe958b..4fa23864fb 100755 --- a/tools/docker/install_docker.sh +++ b/tools/docker/install_docker.sh @@ -55,21 +55,10 @@ if ! timeout $SERVICE_TIMEOUT sh -c "$CONFIGURE_CMD"; then die $LINENO "docker did not start" fi +# Get guest container image +docker pull $DOCKER_IMAGE +docker tag $DOCKER_IMAGE $DOCKER_IMAGE_NAME -# Get Docker image -if [[ ! -r $FILES/docker-ut.tar.gz ]]; then - (cd $FILES; curl -OR $DOCKER_IMAGE) -fi -if [[ ! -r $FILES/docker-ut.tar.gz ]]; then - die $LINENO "Docker image unavailable" -fi -docker import - $DOCKER_IMAGE_NAME <$FILES/docker-ut.tar.gz - -# Get Docker registry image -if [[ ! -r $FILES/docker-registry.tar.gz ]]; then - (cd $FILES; curl -OR $DOCKER_REGISTRY_IMAGE) -fi -if [[ ! -r $FILES/docker-registry.tar.gz ]]; then - die $LINENO "Docker registry image unavailable" -fi -docker import - $DOCKER_REGISTRY_IMAGE_NAME <$FILES/docker-registry.tar.gz +# Get docker-registry image +docker pull $REGISTRY_IMAGE +docker tag $REGISTRY_IMAGE $REGISTRY_IMAGE_NAME From 19a3814b9a3afc24a77c5c301622661f388475d5 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 30 Jan 2014 15:49:53 +0100 Subject: [PATCH 0085/4119] glance: stop using deprecated notifier_strategy Change-Id: Ic796f0ad57db45bf053312ad10815461528030b3 --- lib/glance | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/glance b/lib/glance index 2d41ea4653..07c4408efc 100644 --- a/lib/glance +++ b/lib/glance @@ -108,10 +108,8 @@ function configure_glance() { iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $GLANCE_API_CONF keystone_authtoken admin_user glance iniset $GLANCE_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - if is_service_enabled qpid; then - iniset $GLANCE_API_CONF DEFAULT notifier_strategy qpid - elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $GLANCE_API_CONF DEFAULT notifier_strategy rabbit + if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then + iniset $GLANCE_API_CONF DEFAULT notification_driver messaging fi iniset_rpc_backend glance $GLANCE_API_CONF DEFAULT iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api From 061c14da01bb25ff86e0bfdb5e1bed887cb63997 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 30 Jan 2014 15:51:37 +0100 Subject: [PATCH 0086/4119] ironic: remove notifier_strategy option This has never exited in Ironic, and it does not even uses notification. Change-Id: I4a3d386116561d9a22d650f123df1aae5ed9849e --- lib/ironic | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/ironic b/lib/ironic index b8838f59fb..983add83d1 100644 --- a/lib/ironic +++ b/lib/ironic @@ -105,11 +105,6 @@ function configure_ironic_api() { iniset $IRONIC_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $IRONIC_CONF_FILE keystone_authtoken admin_user ironic iniset $IRONIC_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD - if is_service_enabled qpid; then - iniset $IRONIC_CONF_FILE DEFAULT notifier_strategy qpid - elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $IRONIC_CONF_FILE DEFAULT notifier_strategy rabbit - fi iniset_rpc_backend ironic $IRONIC_CONF_FILE DEFAULT iniset $IRONIC_CONF_FILE keystone_authtoken signing_dir $IRONIC_AUTH_CACHE_DIR/api From 6114a518de8d2db560db193ed4bc26d6e1659ce7 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 31 Jan 2014 08:21:24 -0500 Subject: [PATCH 0087/4119] fix sar reporting in the gate the sar filter made an assumption of time display including an AM/PM... which isn't true in all environments. Hence the blank sysstat screen in the gate runs of late. This fixes that, and displays the first line which includes header version to make sure we are functioning. Change-Id: I537e0bf2127efaf337c4792bc23d938145c8990d --- tools/sar_filter.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/sar_filter.py b/tools/sar_filter.py index ed8c19687c..24ef0e476c 100755 --- a/tools/sar_filter.py +++ b/tools/sar_filter.py @@ -25,10 +25,10 @@ def is_data_line(line): def parse_line(line): - m = re.search('(\d\d:\d\d:\d\d \w\w)(\s+((\S+)\s*)+)', line) + m = re.search('(\d\d:\d\d:\d\d( \w\w)?)(\s+((\S+)\s*)+)', line) if m: date = m.group(1) - data = m.group(2).rstrip() + data = m.group(3).rstrip() return date, data else: return None, None @@ -47,6 +47,10 @@ def parse_line(line): data_line = "" printed_header = False current_ts = None + +# print out the first sysstat line regardless +print process.stdout.readline() + while True: nextline = process.stdout.readline() if nextline == '' and process.poll() is not None: From 43d950843769135d32ce316cfb0f72697a879623 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Thu, 30 Jan 2014 17:49:22 -0500 Subject: [PATCH 0088/4119] Install libguestfs for nova-compute on Ubuntu We were already installing this for n-cpu on rpm distros, but not Ubuntu. Install it so that nova-compute can use it for file injection, which is the preferred method over nbd. Set CONF.libvirt.inject_partition to -1. This enables using libguestfs to determine the proper partition to inject into. Don't bother trying to load the nbd kernel module anymore. It won't be used since we know always expect libguestfs to be installed. Change-Id: Ifa9d95bf759f1dad8685590a2df242d852dd2cb0 --- files/apts/n-cpu | 2 +- lib/nova | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/files/apts/n-cpu b/files/apts/n-cpu index 29e37603b7..b287107256 100644 --- a/files/apts/n-cpu +++ b/files/apts/n-cpu @@ -1,8 +1,8 @@ # Stuff for diablo volumes -nbd-client lvm2 open-iscsi open-iscsi-utils # Deprecated since quantal dist:precise genisoimage sysfsutils sg3-utils +python-guestfs diff --git a/lib/nova b/lib/nova index 9db19ed532..d5f7514be5 100644 --- a/lib/nova +++ b/lib/nova @@ -240,8 +240,10 @@ function configure_nova() { sudo sysctl -w net.ipv4.ip_forward=1 if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - # Attempt to load modules: network block device - used to manage qcow images - sudo modprobe nbd || true + # When libguestfs is available for file injection, enable using + # libguestfs to inspect the image and figure out the proper + # partition to inject into. + iniset $NOVA_CONF libvirt inject_partition '-1' # Check for kvm (hardware based virtualization). If unable to initialize # kvm, we drop back to the slower emulation mode (qemu). Note: many systems From c4f47345a588b15d83ebc5584c8698843b568a40 Mon Sep 17 00:00:00 2001 From: Jeremy Stanley Date: Sat, 25 Jan 2014 01:10:31 +0000 Subject: [PATCH 0089/4119] Make MySQL query logging optional * lib/databases/mysql: Wrap query log configuration in a check for a ENABLE_QUERY_LOGGING variable. * stackrc: Add the DATABASE_QUERY_LOGGING variable defaulted to True. Change-Id: Iddf8538ad0a1e36e2c6944dc70315984026c8245 --- lib/databases/mysql | 33 +++++++++++++++++++-------------- stackrc | 3 +++ 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 0eb8fdd7a2..476b4b91b7 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -87,20 +87,25 @@ function configure_database_mysql { default-storage-engine = InnoDB" $MY_CONF fi - # Turn on slow query log - sudo sed -i '/log.slow.queries/d' $MY_CONF - sudo sed -i -e "/^\[mysqld\]/ a \ -log-slow-queries = /var/log/mysql/mysql-slow.log" $MY_CONF - - # Log all queries (any query taking longer than 0 seconds) - sudo sed -i '/long.query.time/d' $MY_CONF - sudo sed -i -e "/^\[mysqld\]/ a \ -long-query-time = 0" $MY_CONF - - # Log all non-indexed queries - sudo sed -i '/log.queries.not.using.indexes/d' $MY_CONF - sudo sed -i -e "/^\[mysqld\]/ a \ -log-queries-not-using-indexes" $MY_CONF + if [[ "$DATABASE_QUERY_LOGGING" == "True" ]]; then + echo_summary "Enabling MySQL query logging" + + # Turn on slow query log + sudo sed -i '/log.slow.queries/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ + log-slow-queries = /var/log/mysql/mysql-slow.log" $MY_CONF + + # Log all queries (any query taking longer than 0 seconds) + sudo sed -i '/long.query.time/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ + long-query-time = 0" $MY_CONF + + # Log all non-indexed queries + sudo sed -i '/log.queries.not.using.indexes/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ + log-queries-not-using-indexes" $MY_CONF + + fi restart_service $MYSQL } diff --git a/stackrc b/stackrc index 49fb26b2c7..276ce33970 100644 --- a/stackrc +++ b/stackrc @@ -59,6 +59,9 @@ elif [[ -f $RC_DIR/.localrc.auto ]]; then source $RC_DIR/.localrc.auto fi +# This can be used to turn database query logging on and off +# (currently only implemented for MySQL backend) +DATABASE_QUERY_LOGGING=$(trueorfalse True $DATABASE_QUERY_LOGGING) # Repositories # ------------ From 1272bc5e93f171c8d7193475547c43b9032b5c39 Mon Sep 17 00:00:00 2001 From: Nikhil Manchanda Date: Fri, 31 Jan 2014 15:04:05 -0800 Subject: [PATCH 0090/4119] Pipeline filter is 'authtoken' and not 'tokenauth' The pipeline fileter in the api-paste.ini for the keystone middleware was renamed to 'authtoken'. Trove install is not able to authenticate against keystone unless this is renamed Change-Id: I6f912d29c143b3acbc43da222cf8b4c3fafb2c8d --- lib/trove | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/trove b/lib/trove index 1fd011a530..2000446b13 100644 --- a/lib/trove +++ b/lib/trove @@ -129,14 +129,14 @@ function configure_trove() { # Copy api-paste file over to the trove conf dir and configure it cp $TROVE_LOCAL_CONF_DIR/api-paste.ini $TROVE_CONF_DIR/api-paste.ini TROVE_API_PASTE_INI=$TROVE_CONF_DIR/api-paste.ini - iniset $TROVE_API_PASTE_INI filter:tokenauth auth_host $KEYSTONE_AUTH_HOST - iniset $TROVE_API_PASTE_INI filter:tokenauth auth_port $KEYSTONE_AUTH_PORT - iniset $TROVE_API_PASTE_INI filter:tokenauth auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $TROVE_API_PASTE_INI filter:tokenauth cafile $KEYSTONE_SSL_CA - iniset $TROVE_API_PASTE_INI filter:tokenauth admin_tenant_name $SERVICE_TENANT_NAME - iniset $TROVE_API_PASTE_INI filter:tokenauth admin_user trove - iniset $TROVE_API_PASTE_INI filter:tokenauth admin_password $SERVICE_PASSWORD - iniset $TROVE_API_PASTE_INI filter:tokenauth signing_dir $TROVE_AUTH_CACHE_DIR + iniset $TROVE_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $TROVE_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $TROVE_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $TROVE_API_PASTE_INI filter:authtoken cafile $KEYSTONE_SSL_CA + iniset $TROVE_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $TROVE_API_PASTE_INI filter:authtoken admin_user trove + iniset $TROVE_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD + iniset $TROVE_API_PASTE_INI filter:authtoken signing_dir $TROVE_AUTH_CACHE_DIR # (Re)create trove conf files rm -f $TROVE_CONF_DIR/trove.conf From 7bc783b95b2e115f40a4db8823823573afe7a768 Mon Sep 17 00:00:00 2001 From: Nathan Kinder Date: Fri, 31 Jan 2014 16:54:10 -0800 Subject: [PATCH 0091/4119] LDAP root DN creation fails When keystone is configured to set up an LDAP server to use as it's identity backend, the creation of the root DN fails. The problem is that one of the mods in the modify operation that sets up the root DN is incorrect, which causes the entire modify operation to fail. The incorrect mod is attempting to configure some attribute indexes, but one of the attributes it specifies is undefined. This patch removes the undefined attribute from the template that is used to create the modify operation. Change-Id: I413587130c64ca4f5f467b2ea1c0ab12867999ce Closes-Bug: 1275158 --- files/ldap/manager.ldif.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in index de3b69de7c..2f1f1395ee 100644 --- a/files/ldap/manager.ldif.in +++ b/files/ldap/manager.ldif.in @@ -12,4 +12,4 @@ olcRootPW: ${SLAPPASS} replace: olcDbIndex olcDbIndex: objectClass eq olcDbIndex: default pres,eq -olcDbIndex: cn,sn,givenName,co +olcDbIndex: cn,sn,givenName From 6bf1f1fb332c93cb4b74cf6b6511d2f9818a501d Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Sat, 1 Feb 2014 17:05:18 -0500 Subject: [PATCH 0092/4119] use ext4 for guest default ephemeral this isn't upstream default because of compatibility questions with really old host on providers. However there is no reason not to do it in devstack. Change-Id: I6438c0efb297cfa5d3dbb5f00701b24f01c39d14 --- lib/nova_plugins/hypervisor-libvirt | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 6f90f4ac17..42d3af15cf 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -108,6 +108,7 @@ EOF" iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" iniset $NOVA_CONF DEFAULT use_usb_tablet "False" + iniset $NOVA_CONF DEFAULT default_ephemeral_format "ext4" iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver" LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" From 2ed4ae70b820ad3cbd12f2b6c2452ff66005ebaa Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 2 Feb 2014 09:38:05 +0100 Subject: [PATCH 0093/4119] Have ceilometer to respect the keystone settings lib/ceilometer ignored the global settings related to keystone settings. It can cause issues for example when the keystone does not listen on 127.0.0.1 even in single node deployment. Change-Id: I6e4654daa2ec624ac11aaf7f49495fcfaa72071d --- lib/ceilometer | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ceilometer b/lib/ceilometer index 30bf3aed50..75c00b6b07 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -137,7 +137,9 @@ function configure_ceilometer() { iniset $CEILOMETER_CONF DEFAULT os_password $SERVICE_PASSWORD iniset $CEILOMETER_CONF DEFAULT os_tenant_name $SERVICE_TENANT_NAME - iniset $CEILOMETER_CONF keystone_authtoken auth_protocol http + iniset $CEILOMETER_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $CEILOMETER_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $CEILOMETER_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL iniset $CEILOMETER_CONF keystone_authtoken admin_user ceilometer iniset $CEILOMETER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME From 85a85f87f814446dd2364eea1b6d976d50500203 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Tue, 21 Jan 2014 11:13:55 +0100 Subject: [PATCH 0094/4119] Use service role with glance service glance just used to admin role for token validation, the service role is sufficient for this. glance also needs an user with enough permission to use swift, so creating a dedictated service user for swift usage when s-proxy is enabled. Change-Id: I6df3905e5db35ea3421468ca1ee6d8de3271f8d1 --- files/keystone_data.sh | 24 +++++++++++++++++++----- lib/glance | 2 +- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index d477c42906..9a34c7616f 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -2,12 +2,14 @@ # # Initial data for Keystone using python-keystoneclient # -# Tenant User Roles +# Tenant User Roles # ------------------------------------------------------------------ -# service glance admin -# service heat service # if enabled +# service glance service +# service glance-swift ResellerAdmin +# service heat service # if enabled +# service ceilometer admin # if enabled # Tempest Only: -# alt_demo alt_demo Member +# alt_demo alt_demo Member # # Variables set before calling this script: # SERVICE_TOKEN - aka admin_token in keystone.conf @@ -96,7 +98,19 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then keystone user-role-add \ --tenant $SERVICE_TENANT_NAME \ --user glance \ - --role admin + --role service + # required for swift access + if [[ "$ENABLED_SERVICES" =~ "s-proxy" ]]; then + keystone user-create \ + --name=glance-swift \ + --pass="$SERVICE_PASSWORD" \ + --tenant $SERVICE_TENANT_NAME \ + --email=glance-swift@example.com + keystone user-role-add \ + --tenant $SERVICE_TENANT_NAME \ + --user glance-swift \ + --role ResellerAdmin + fi if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then keystone service-create \ --name=glance \ diff --git a/lib/glance b/lib/glance index 2d41ea4653..00f499a0b9 100644 --- a/lib/glance +++ b/lib/glance @@ -124,7 +124,7 @@ function configure_glance() { if is_service_enabled s-proxy; then iniset $GLANCE_API_CONF DEFAULT default_store swift iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ - iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance + iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance-swift iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True From 8664ca53f80849553043aba9663f7cb72a9cec42 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 2 Feb 2014 10:07:39 +0100 Subject: [PATCH 0095/4119] bash_completion for heat and ceilometer Installing bash completion for heat and ceilometer by using a similar way used with other services. Change-Id: I5094648272f2666f6bff181bfa3aeb35e863bd97 --- lib/ceilometer | 1 + lib/heat | 1 + 2 files changed, 2 insertions(+) diff --git a/lib/ceilometer b/lib/ceilometer index 30bf3aed50..6a72459d41 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -105,6 +105,7 @@ function cleanup_ceilometer() { # configure_ceilometerclient() - Set config files, create data dirs, etc function configure_ceilometerclient() { setup_develop $CEILOMETERCLIENT_DIR + sudo install -D -m 0644 -o $STACK_USER {$CEILOMETERCLIENT_DIR/tools/,/etc/bash_completion.d/}ceilometer.bash_completion } # configure_ceilometer() - Set config files, create data dirs, etc diff --git a/lib/heat b/lib/heat index 467619f3c6..f171cb450c 100644 --- a/lib/heat +++ b/lib/heat @@ -157,6 +157,7 @@ function create_heat_cache_dir() { function install_heatclient() { git_clone $HEATCLIENT_REPO $HEATCLIENT_DIR $HEATCLIENT_BRANCH setup_develop $HEATCLIENT_DIR + sudo install -D -m 0644 -o $STACK_USER {$HEATCLIENT_DIR/tools/,/etc/bash_completion.d/}heat.bash_completion } # install_heat() - Collect source and prepare From 0af8122834917b4e44ee0cfae22eb5f93472f1a6 Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Sun, 2 Feb 2014 09:59:07 +1300 Subject: [PATCH 0096/4119] Disable file injection for libvirt driver Change-Id: I73289195d3bb455f4076fadd2eadd6036b04b722 --- lib/nova | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/lib/nova b/lib/nova index d5f7514be5..0db242a34a 100644 --- a/lib/nova +++ b/lib/nova @@ -240,10 +240,9 @@ function configure_nova() { sudo sysctl -w net.ipv4.ip_forward=1 if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - # When libguestfs is available for file injection, enable using - # libguestfs to inspect the image and figure out the proper - # partition to inject into. - iniset $NOVA_CONF libvirt inject_partition '-1' + # File injection is being disabled by default in the near future - + # disable it here for now to avoid surprises later. + iniset $NOVA_CONF libvirt inject_partition '-2' # Check for kvm (hardware based virtualization). If unable to initialize # kvm, we drop back to the slower emulation mode (qemu). Note: many systems From 0d4bd7e6104bee974a544422456d731eb664805c Mon Sep 17 00:00:00 2001 From: Anita Kuno Date: Sun, 2 Feb 2014 14:59:39 -0600 Subject: [PATCH 0097/4119] Silence commands to echo copyright notices This patch silences commands that echoed copyright notices to the devstack logs. The copyright notices are moved to the top of the file as comments. Change-Id: I8d474a366af2954c168ba8d07329392f56e8e75a --- exercises/neutron-adv-test.sh | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index 1343f11553..a9199e62a6 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -1,6 +1,11 @@ #!/usr/bin/env bash # - +# Copyright 2012, Cisco Systems +# Copyright 2012, VMware, Inc. +# Copyright 2012, NTT MCL, Inc. +# +# Please direct any questions to dedutta@cisco.com, dwendlandt@vmware.com, nachi@nttmcl.com +# # **neutron-adv-test.sh** # Perform integration testing of Nova and other components with Neutron. @@ -406,14 +411,6 @@ usage() { main() { echo Description - echo - echo Copyright 2012, Cisco Systems - echo Copyright 2012, VMware, Inc. - echo Copyright 2012, NTT MCL, Inc. - echo - echo Please direct any questions to dedutta@cisco.com, dwendlandt@vmware.com, nachi@nttmcl.com - echo - if [ $# -eq 0 ] ; then # if no args are provided, run all tests From c643ebb26dac484e56aea7b5f30d97fe7711f6f3 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Sun, 2 Feb 2014 09:16:20 +0000 Subject: [PATCH 0098/4119] XenAPI: Fix new useage of trueorfalse * Ensure that Xen setup scripts will continue to function when unset variables are used in stackrc * Ensure that the generic functions are sourced in all places that xenrc (which sources stackrc) is sourced. Change-Id: I54eba20733c2e149621b74a1387f0bef14fca12e --- tools/xen/build_xva.sh | 10 ++++++++++ tools/xen/prepare_guest_template.sh | 10 ++++++++++ tools/xen/xenrc | 3 +++ 3 files changed, 23 insertions(+) diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index 958102b29c..fbbfd6fbe5 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -21,9 +21,19 @@ set -o xtrace # This directory TOP_DIR=$(cd $(dirname "$0") && pwd) +# Source lower level functions +. $TOP_DIR/../../functions + # Include onexit commands . $TOP_DIR/scripts/on_exit.sh +# xapi functions +. $TOP_DIR/functions + +# Determine what system we are running on. +# Might not be XenServer if we're using xenserver-core +GetDistro + # Source params - override xenrc params in your localrc to suite your taste source xenrc diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh index 546ac99cd9..4fa70d377d 100755 --- a/tools/xen/prepare_guest_template.sh +++ b/tools/xen/prepare_guest_template.sh @@ -22,9 +22,19 @@ set -o xtrace # This directory TOP_DIR=$(cd $(dirname "$0") && pwd) +# Source lower level functions +. $TOP_DIR/../../functions + # Include onexit commands . $TOP_DIR/scripts/on_exit.sh +# xapi functions +. $TOP_DIR/functions + +# Determine what system we are running on. +# Might not be XenServer if we're using xenserver-core +GetDistro + # Source params - override xenrc params in your localrc to suite your taste source xenrc diff --git a/tools/xen/xenrc b/tools/xen/xenrc index cd282341cb..96f3734a1d 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -91,4 +91,7 @@ UBUNTU_INST_GATEWAY="" # Set the size to 0 to avoid creation of additional disk. XEN_XVDB_SIZE_GB=0 +restore_nounset=`set +o | grep nounset` +set +u source ../../stackrc +$restore_nounset From ca920576cb9c36b7d26a3ce523c9d9a25b3f5db8 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 3 Feb 2014 15:26:20 +0100 Subject: [PATCH 0099/4119] nova: use the correct notification driver Nova now uses oslo.messaging and not the Oslo RPC code anymore, therefore the new driver should be used instead. Change-Id: I3533975ad38ff99bee6cfaa5332843444650f61f --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index d5f7514be5..722b994896 100644 --- a/lib/nova +++ b/lib/nova @@ -447,7 +447,7 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT instance_usage_audit "True" iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour" iniset $NOVA_CONF DEFAULT notify_on_state_change "vm_and_task_state" - iniset $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier" + iniset $NOVA_CONF DEFAULT notification_driver "messaging" fi # Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS`` From a03607d03f18fbe842bb61a509a868e1447fc379 Mon Sep 17 00:00:00 2001 From: Ivar Lazzaro Date: Mon, 3 Feb 2014 06:28:14 -0800 Subject: [PATCH 0100/4119] Embrane Plugin Support Implements blueprint embrane-plugin-support This commit implements Embrane's Neutron plugin installation support in Devstack. This is an extension of the openvswitch installation module, which is used by the main plugin, and enables configuration by localrc Change-Id: Ia4824f8d2300bcdce170d226145bbce6088f1557 --- lib/neutron_plugins/embrane | 40 +++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 lib/neutron_plugins/embrane diff --git a/lib/neutron_plugins/embrane b/lib/neutron_plugins/embrane new file mode 100644 index 0000000000..4206a2053c --- /dev/null +++ b/lib/neutron_plugins/embrane @@ -0,0 +1,40 @@ +# Neutron Embrane plugin +# --------------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/neutron_plugins/openvswitch + +save_function() { + local ORIG_FUNC=$(declare -f $1) + local NEW_FUNC="$2${ORIG_FUNC#$1}" + eval "$NEW_FUNC" +} + +save_function neutron_plugin_configure_service _neutron_plugin_configure_service + +function neutron_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/embrane + Q_PLUGIN_CONF_FILENAME=heleos_conf.ini + Q_DB_NAME="ovs_neutron" + Q_PLUGIN_CLASS="neutron.plugins.embrane.plugins.embrane_ovs_plugin.EmbraneOvsPlugin" +} + +function neutron_plugin_configure_service() { + _neutron_plugin_configure_service + iniset /$Q_PLUGIN_CONF_FILE heleos esm_mgmt $HELEOS_ESM_MGMT + iniset /$Q_PLUGIN_CONF_FILE heleos admin_username $HELEOS_ADMIN_USERNAME + iniset /$Q_PLUGIN_CONF_FILE heleos admin_password $HELEOS_ADMIN_PASSWORD + iniset /$Q_PLUGIN_CONF_FILE heleos router_image $HELEOS_ROUTER_IMAGE + iniset /$Q_PLUGIN_CONF_FILE heleos mgmt_id $HELEOS_MGMT_ID + iniset /$Q_PLUGIN_CONF_FILE heleos inband_id $HELEOS_INBAND_ID + iniset /$Q_PLUGIN_CONF_FILE heleos oob_id $HELEOS_OOB_ID + iniset /$Q_PLUGIN_CONF_FILE heleos dummy_utif_id $HELEOS_DUMMY_UTIF_ID + iniset /$Q_PLUGIN_CONF_FILE heleos resource_pool_id $HELEOS_RESOURCE_POOL_ID + iniset /$Q_PLUGIN_CONF_FILE heleos async_requests $HELEOS_ASYNC_REQUESTS +} + +# Restore xtrace +$MY_XTRACE \ No newline at end of file From 0656e12d6819f6dee671dd6200b2d0895e716c2c Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 3 Feb 2014 08:49:30 +0900 Subject: [PATCH 0101/4119] add ability to ignore rules in bash8 Change-Id: Ia6472f4bb251bf3e9846e08e30b2f9ea30ea1c03 --- tools/bash8.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/tools/bash8.py b/tools/bash8.py index edf7da4645..2623358182 100755 --- a/tools/bash8.py +++ b/tools/bash8.py @@ -30,8 +30,18 @@ import re import sys - ERRORS = 0 +IGNORE = None + + +def register_ignores(ignores): + global IGNORE + if ignores: + IGNORE='^(' + '|'.join(ignores.split(',')) + ')' + + +def should_ignore(error): + return IGNORE and re.search(IGNORE, error) def print_error(error, line): @@ -97,11 +107,13 @@ def get_options(): description='A bash script style checker') parser.add_argument('files', metavar='file', nargs='+', help='files to scan for errors') + parser.add_argument('-i', '--ignore', help='Rules to ignore') return parser.parse_args() def main(): opts = get_options() + register_ignores(opts.ignore) check_files(opts.files) if ERRORS > 0: From 864902ed01f92a9f587ebf0b582357fe2a9ea086 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Mon, 3 Feb 2014 21:00:39 +0000 Subject: [PATCH 0102/4119] Use github for swift3. swift3 is not on OpenStack infra (yet) use the github url instead. Closes-Bug: #1275923 Change-Id: I0cc393f93b65dcf8642b3a35925eb9eba3c2e1eb --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 8a0280ecfa..b138f42546 100644 --- a/stackrc +++ b/stackrc @@ -162,7 +162,7 @@ REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-master} # storage service SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git} SWIFT_BRANCH=${SWIFT_BRANCH:-master} -SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/fujita/swift3.git} +SWIFT3_REPO=${SWIFT3_REPO:-http://github.com/fujita/swift3.git} SWIFT3_BRANCH=${SWIFT3_BRANCH:-master} # python swift client library From 891277fbbdf65427b43f194adaafbbf2a4ac4800 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Mon, 3 Feb 2014 21:07:03 +0000 Subject: [PATCH 0103/4119] s3_token has been moved to keystoneclient. Change-Id: I6ffe756d517d11f323bd0c5d3b877d9a9f739a3b --- lib/swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/swift b/lib/swift index 37b630c3fa..a182e5adfb 100644 --- a/lib/swift +++ b/lib/swift @@ -336,7 +336,7 @@ function configure_swift() { # NOTE(chmou): s3token middleware is not updated yet to use only # username and password. [filter:s3token] -paste.filter_factory = keystone.middleware.s3_token:filter_factory +paste.filter_factory = keystoneclient.middleware.s3_token:filter_factory auth_port = ${KEYSTONE_AUTH_PORT} auth_host = ${KEYSTONE_AUTH_HOST} auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} From f36a9b2136b4ba56ac2989f7829c55b4eb1c08af Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Mon, 3 Feb 2014 23:44:47 +0100 Subject: [PATCH 0104/4119] No need to loop over with pkill -f I guess four times is better than one but if we need four times to kill swift processes there is something pretty bad with it. Change-Id: Id2ea2f4ca60feb9fddc7b3181063760d2044b421 --- lib/swift | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/swift b/lib/swift index 54d6f1c2e6..28ca8a80df 100644 --- a/lib/swift +++ b/lib/swift @@ -657,10 +657,8 @@ function stop_swift() { if type -p swift-init >/dev/null; then swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true fi - for type in proxy object container account; do - # Dump all of the servers - pkill -f swift- - done + # Dump all of the servers + pkill -f swift- } # Restore xtrace From 8dad4bde886ed2a5bb28d8eb43cfa874ee81c790 Mon Sep 17 00:00:00 2001 From: Arnaud Legendre Date: Mon, 3 Feb 2014 17:57:39 -0800 Subject: [PATCH 0105/4119] upload_image.sh to support streamOptimized disks The current version of the script will use "preallocated" as the disk type of a stream optimized disk. This needs to be fixed by introspecting the createType of the vmdk file. Closes-Bug: #1275993 Change-Id: I98594acecf26dd1164870f43890254a19ef23fe9 --- functions | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/functions b/functions index 73d65ce15b..281b6767c5 100644 --- a/functions +++ b/functions @@ -1450,7 +1450,7 @@ function upload_image() { # vmdk disk type vmdk_create_type="$(head -25 $IMAGE | grep -a -F -m 1 'createType=' $IMAGE)" vmdk_create_type="${vmdk_create_type#*\"}" - vmdk_create_type="${vmdk_create_type%?}" + vmdk_create_type="${vmdk_create_type%\"*}" descriptor_data_pair_msg="Monolithic flat and VMFS disks "` `"should use a descriptor-data pair." @@ -1495,6 +1495,8 @@ function upload_image() { IMAGE_NAME="${flat_fname}" fi vmdk_disktype="preallocated" + elif [[ "$vmdk_create_type" = "streamOptimized" ]]; then + vmdk_disktype="streamOptimized" elif [[ -z "$vmdk_create_type" ]]; then # *-flat.vmdk provided: attempt to retrieve the descriptor (*.vmdk) # to retrieve appropriate metadata @@ -1533,10 +1535,8 @@ function upload_image() { vmdk_adapter_type="${vmdk_adapter_type%?}" fi fi - #TODO(alegendre): handle streamOptimized once supported by the VMware driver. vmdk_disktype="preallocated" else - #TODO(alegendre): handle streamOptimized once supported by the VMware driver. vmdk_disktype="preallocated" fi From d70ba82b14b0c47fd87a957e9f2ca5ddda69948b Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Tue, 4 Feb 2014 14:33:27 +1300 Subject: [PATCH 0106/4119] Move file injection setting to the right place The nova code was wiping nova.conf after our iniset :(. Change-Id: Ib618da1bd21da09f8855ec4691bff79c4c3b3d9c --- lib/nova | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/nova b/lib/nova index 0db242a34a..dbc5c3db44 100644 --- a/lib/nova +++ b/lib/nova @@ -240,10 +240,6 @@ function configure_nova() { sudo sysctl -w net.ipv4.ip_forward=1 if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - # File injection is being disabled by default in the near future - - # disable it here for now to avoid surprises later. - iniset $NOVA_CONF libvirt inject_partition '-2' - # Check for kvm (hardware based virtualization). If unable to initialize # kvm, we drop back to the slower emulation mode (qemu). Note: many systems # come with hardware virtualization disabled in BIOS. @@ -499,6 +495,12 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" iniset_rpc_backend nova $NOVA_CONF DEFAULT iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" + + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + # File injection is being disabled by default in the near future - + # disable it here for now to avoid surprises later. + iniset $NOVA_CONF libvirt inject_partition '-2' + fi } function init_nova_cells() { From b408dd2072462f47ae294b601039c12136034e5e Mon Sep 17 00:00:00 2001 From: Denis Makogon Date: Tue, 4 Feb 2014 12:58:59 +0200 Subject: [PATCH 0107/4119] Remove unneeded guest conf values Reasons: - guest service doesn't depend on "sql_connection" value any more; - "exchange_control" already set in trove-guestagent.conf.sample to "trove"; Change-Id: Ifbdb21ac4639d86cf7775634f5b31cfb9739b49f Closes-Bug: #1256046 --- lib/trove | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/trove b/lib/trove index 2000446b13..bb4549121d 100644 --- a/lib/trove +++ b/lib/trove @@ -148,8 +148,6 @@ function configure_trove() { iniset $TROVE_CONF_DIR/trove.conf DEFAULT add_addresses True iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT sql_connection `database_connection_url trove` - iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT control_exchange trove sed -i "s/localhost/$NETWORK_GATEWAY/g" $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample setup_trove_logging $TROVE_CONF_DIR/trove.conf From db1c3847752c84a9fc06186a3352f02b76c1aa7c Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Tue, 4 Feb 2014 20:58:00 +0000 Subject: [PATCH 0108/4119] Fix config group for cinder multi_backend This commit just updates the config group for the multi_backend option. Tempest change d5c9602b created a volume-feature-enabled group and moved this volume there but devstack was never updated with the change. Closes-Bug: #1276326 Change-Id: Icf2e96783feec4edbd4d477f8492651cd9bb3f01 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 06183b107b..6fa35d19fd 100644 --- a/lib/tempest +++ b/lib/tempest @@ -323,7 +323,7 @@ function configure_tempest() { fi CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND) if [ $CINDER_MULTI_LVM_BACKEND == "True" ]; then - iniset $TEMPEST_CONFIG volume multi_backend_enabled "True" + iniset $TEMPEST_CONFIG volume-feature-enabled multi_backend "True" iniset $TEMPEST_CONFIG volume backend1_name "LVM_iSCSI" iniset $TEMPEST_CONFIG volume backend2_name "LVM_iSCSI_2" fi From 41e36d6bcd3ab04cd3955aef68162c3266dc958e Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Tue, 4 Feb 2014 13:39:32 -0800 Subject: [PATCH 0109/4119] Replace NvpPluginV2 with NsxPlugin The king is dead, long live the king! Partial-implements blueprint: nicira-plugin-renaming Change-Id: I9b71479a8d4228d45a6591b169c489c0107fb04c --- lib/neutron_plugins/vmware_nsx | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/neutron_plugins/vmware_nsx b/lib/neutron_plugins/vmware_nsx index d506cb6f8d..74f98df577 100644 --- a/lib/neutron_plugins/vmware_nsx +++ b/lib/neutron_plugins/vmware_nsx @@ -41,8 +41,7 @@ function neutron_plugin_configure_common() { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware Q_PLUGIN_CONF_FILENAME=nsx.ini Q_DB_NAME="neutron_nsx" - # TODO(armando-migliaccio): rename this once the code rename is complete - Q_PLUGIN_CLASS="neutron.plugins.nicira.NeutronPlugin.NvpPluginV2" + Q_PLUGIN_CLASS="neutron.plugins.vmware.plugin.NsxPlugin" } function neutron_plugin_configure_debug_command() { From 1023ff7c3ac184da00b6306f361f285301849881 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 27 Jan 2014 14:56:44 -0600 Subject: [PATCH 0110/4119] Move ironic to plugin Also adds an is_ironic_enabled() function to prepare for an upcoming change in is_service_enabled(). Change-Id: I6e6e0e8b70221e231785ab27e9b5d4836933ac4c --- extras.d/50-ironic.sh | 33 +++++++++++++++++++++++++++++++++ lib/ironic | 7 +++++++ stack.sh | 21 --------------------- unstack.sh | 7 ------- 4 files changed, 40 insertions(+), 28 deletions(-) create mode 100644 extras.d/50-ironic.sh diff --git a/extras.d/50-ironic.sh b/extras.d/50-ironic.sh new file mode 100644 index 0000000000..f68a14680f --- /dev/null +++ b/extras.d/50-ironic.sh @@ -0,0 +1,33 @@ +# ironic.sh - Devstack extras script to install ironic + +if is_service_enabled ir-api ir-cond; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/ironic + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + echo_summary "Installing Ironic" + install_ironic + install_ironicclient + cleanup_ironic + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + echo_summary "Configuring Ironic" + configure_ironic + + if is_service_enabled key; then + create_ironic_accounts + fi + + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + # Initialize ironic + init_ironic + + # Start the ironic API and ironic taskmgr components + echo_summary "Starting Ironic" + start_ironic + fi + + if [[ "$1" == "unstack" ]]; then + stop_ironic + cleanup_ironic + fi +fi diff --git a/lib/ironic b/lib/ironic index afbc3e09e4..afb7c23d2c 100644 --- a/lib/ironic +++ b/lib/ironic @@ -46,6 +46,13 @@ IRONIC_HOSTPORT=${IRONIC_HOSTPORT:-$SERVICE_HOST:6385} # Functions # --------- +# Test if any Ironic services are enabled +# is_ironic_enabled +function is_ironic_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"ir-" ]] && return 0 + return 1 +} + # install_ironic() - Collect source and prepare function install_ironic() { git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH diff --git a/stack.sh b/stack.sh index 45d47c819c..a1cf595cf0 100755 --- a/stack.sh +++ b/stack.sh @@ -336,7 +336,6 @@ source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap -source $TOP_DIR/lib/ironic # Extras Source # -------------- @@ -746,11 +745,6 @@ if is_service_enabled tls-proxy; then # don't be naive and add to existing line! fi -if is_service_enabled ir-api ir-cond; then - install_ironic - install_ironicclient - configure_ironic -fi # Extras Install # -------------- @@ -966,15 +960,6 @@ if is_service_enabled g-reg; then fi -# Ironic -# ------ - -if is_service_enabled ir-api ir-cond; then - echo_summary "Configuring Ironic" - init_ironic -fi - - # Neutron # ------- @@ -1101,12 +1086,6 @@ if is_service_enabled g-api g-reg; then start_glance fi -# Launch the Ironic services -if is_service_enabled ir-api ir-cond; then - echo_summary "Starting Ironic" - start_ironic -fi - # Create an access key and secret key for nova ec2 register image if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1) diff --git a/unstack.sh b/unstack.sh index 92d0642c38..ea9c27d99b 100755 --- a/unstack.sh +++ b/unstack.sh @@ -55,7 +55,6 @@ source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap -source $TOP_DIR/lib/ironic # Extras Source # -------------- @@ -118,12 +117,6 @@ if is_service_enabled s-proxy; then cleanup_swift fi -# Ironic runs daemons -if is_service_enabled ir-api ir-cond; then - stop_ironic - cleanup_ironic -fi - # Apache has the WSGI processes if is_service_enabled horizon; then stop_horizon From 75dbd9b1a3d6fa7d72b95d72a3102d8fdc76fd34 Mon Sep 17 00:00:00 2001 From: "Walter A. Boring IV" Date: Tue, 4 Feb 2014 14:56:15 -0800 Subject: [PATCH 0111/4119] Added the import of lib/infra This fixes an error in the devstack/functions setup_develop call, which tries to cd to $REQUIREMENTS_DIR, which is created in lib/infra Change-Id: Ie65d2ba83547acc4ea36d1191e6e90dc21da1fa7 Closes-Bug: #1276365 --- driver_certs/cinder_driver_cert.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh index edcc6d4800..0221e3779c 100755 --- a/driver_certs/cinder_driver_cert.sh +++ b/driver_certs/cinder_driver_cert.sh @@ -24,6 +24,7 @@ TOP_DIR=$(cd $CERT_DIR/..; pwd) source $TOP_DIR/functions source $TOP_DIR/stackrc source $TOP_DIR/openrc +source $TOP_DIR/lib/infra source $TOP_DIR/lib/tempest source $TOP_DIR/lib/cinder From 16dd8b3ed94d5cd217d22a26c18dca52bfca115e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 3 Feb 2014 09:10:54 +0900 Subject: [PATCH 0112/4119] introduce if/then & for/do rules we mostly have a consistent style on if/then & for/do in devstack, except when we don't. This attempts to build a set of rules to enforce this. Because there are times when lines are legitimately long, and there is a continuation, this starts off ignoring if and for loops with continuations. But for short versions, we should enforce this. Changes to make devstack pass are included. The fact that the cleanup patch was so small is pretty solid reason that this is actually the style we've all agreed to. Part of a git stash from hong kong that I finally cleaned up. Change-Id: I6376d7afd59cc5ebba9ed69e5ee784a3d5934a10 --- lib/baremetal | 3 +-- lib/heat | 3 +-- lib/neutron_plugins/bigswitch_floodlight | 6 ++--- lib/neutron_plugins/nec | 3 +-- lib/neutron_thirdparty/bigswitch_floodlight | 3 +-- stack.sh | 4 +-- tests/functions.sh | 12 +++------ tools/bash8.py | 29 +++++++++++++++++++++ tools/xen/install_os_domU.sh | 3 +-- tools/xen/scripts/install-os-vpx.sh | 9 +++---- tools/xen/scripts/on_exit.sh | 6 ++--- tools/xen/test_functions.sh | 6 ++--- 12 files changed, 49 insertions(+), 38 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index a0df85e700..d8cd7e936c 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -431,8 +431,7 @@ function upload_baremetal_image() { function clear_baremetal_of_all_nodes() { list=$(nova baremetal-node-list | awk -F '| ' 'NR>3 {print $2}' ) - for node in $list - do + for node in $list; do nova baremetal-node-delete $node done } diff --git a/lib/heat b/lib/heat index f171cb450c..9f5dd8b588 100644 --- a/lib/heat +++ b/lib/heat @@ -186,8 +186,7 @@ function disk_image_create { local elements=$2 local arch=$3 local output=$TOP_DIR/files/$4 - if [[ -f "$output.qcow2" ]]; - then + if [[ -f "$output.qcow2" ]]; then echo "Image file already exists: $output_file" else ELEMENTS_PATH=$elements_path disk-image-create \ diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index 93ec497bb9..1e4aa00121 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -44,16 +44,14 @@ function neutron_plugin_configure_plugin_agent() { function neutron_plugin_configure_service() { iniset /$Q_PLUGIN_CONF_FILE restproxy servers $BS_FL_CONTROLLERS_PORT iniset /$Q_PLUGIN_CONF_FILE restproxy servertimeout $BS_FL_CONTROLLER_TIMEOUT - if [ "$BS_FL_VIF_DRIVER" = "ivs" ] - then + if [ "$BS_FL_VIF_DRIVER" = "ivs" ]; then iniset /$Q_PLUGIN_CONF_FILE nova vif_type ivs fi } function neutron_plugin_setup_interface_driver() { local conf_file=$1 - if [ "$BS_FL_VIF_DRIVER" = "ivs" ] - then + if [ "$BS_FL_VIF_DRIVER" = "ivs" ]; then iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.IVSInterfaceDriver else iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec index d8d8b7ce7e..1cb2fef533 100644 --- a/lib/neutron_plugins/nec +++ b/lib/neutron_plugins/nec @@ -106,8 +106,7 @@ function _neutron_setup_ovs_tunnels() { local id=0 GRE_LOCAL_IP=${GRE_LOCAL_IP:-$HOST_IP} if [ -n "$GRE_REMOTE_IPS" ]; then - for ip in ${GRE_REMOTE_IPS//:/ } - do + for ip in ${GRE_REMOTE_IPS//:/ }; do if [[ "$ip" == "$GRE_LOCAL_IP" ]]; then continue fi diff --git a/lib/neutron_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight index 1fd4fd801a..24c10443b7 100644 --- a/lib/neutron_thirdparty/bigswitch_floodlight +++ b/lib/neutron_thirdparty/bigswitch_floodlight @@ -24,8 +24,7 @@ function init_bigswitch_floodlight() { sudo ovs-vsctl --no-wait br-set-external-id ${OVS_BRIDGE} bridge-id ${OVS_BRIDGE} ctrls= - for ctrl in `echo ${BS_FL_CONTROLLERS_PORT} | tr ',' ' '` - do + for ctrl in `echo ${BS_FL_CONTROLLERS_PORT} | tr ',' ' '`; do ctrl=${ctrl%:*} ctrls="${ctrls} tcp:${ctrl}:${BS_FL_OF_PORT}" done diff --git a/stack.sh b/stack.sh index 45d47c819c..15e14303cf 100755 --- a/stack.sh +++ b/stack.sh @@ -1124,8 +1124,8 @@ fi # Create a randomized default value for the keymgr's fixed_key if is_service_enabled nova; then FIXED_KEY="" - for i in $(seq 1 64); - do FIXED_KEY+=$(echo "obase=16; $(($RANDOM % 16))" | bc); + for i in $(seq 1 64); do + FIXED_KEY+=$(echo "obase=16; $(($RANDOM % 16))" | bc); done; iniset $NOVA_CONF keymgr fixed_key "$FIXED_KEY" fi diff --git a/tests/functions.sh b/tests/functions.sh index 95dafe1028..06a4134abf 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -49,8 +49,7 @@ function test_enable_service() { ENABLED_SERVICES="$start" enable_service $add - if [ "$ENABLED_SERVICES" = "$finish" ] - then + if [ "$ENABLED_SERVICES" = "$finish" ]; then echo "OK: $start + $add -> $ENABLED_SERVICES" else echo "changing $start to $finish with $add failed: $ENABLED_SERVICES" @@ -76,8 +75,7 @@ function test_disable_service() { ENABLED_SERVICES="$start" disable_service "$del" - if [ "$ENABLED_SERVICES" = "$finish" ] - then + if [ "$ENABLED_SERVICES" = "$finish" ]; then echo "OK: $start - $del -> $ENABLED_SERVICES" else echo "changing $start to $finish with $del failed: $ENABLED_SERVICES" @@ -102,8 +100,7 @@ echo "Testing disable_all_services()" ENABLED_SERVICES=a,b,c disable_all_services -if [[ -z "$ENABLED_SERVICES" ]] -then +if [[ -z "$ENABLED_SERVICES" ]]; then echo "OK" else echo "disabling all services FAILED: $ENABLED_SERVICES" @@ -118,8 +115,7 @@ function test_disable_negated_services() { ENABLED_SERVICES="$start" disable_negated_services - if [ "$ENABLED_SERVICES" = "$finish" ] - then + if [ "$ENABLED_SERVICES" = "$finish" ]; then echo "OK: $start + $add -> $ENABLED_SERVICES" else echo "changing $start to $finish failed: $ENABLED_SERVICES" diff --git a/tools/bash8.py b/tools/bash8.py index 2623358182..9fb51ecc9e 100755 --- a/tools/bash8.py +++ b/tools/bash8.py @@ -21,9 +21,19 @@ # Currently Supported checks # # Errors +# Basic white space errors, for consistent indenting # - E001: check that lines do not end with trailing whitespace # - E002: ensure that indents are only spaces, and not hard tabs # - E003: ensure all indents are a multiple of 4 spaces +# +# Structure errors +# +# A set of rules that help keep things consistent in control blocks. +# These are ignored on long lines that have a continuation, because +# unrolling that is kind of "interesting" +# +# - E010: *do* not on the same line as *for* +# - E011: *then* not on the same line as *if* import argparse import fileinput @@ -51,6 +61,23 @@ def print_error(error, line): print(" - %s: L%s" % (fileinput.filename(), fileinput.filelineno())) +def not_continuation(line): + return not re.search('\\\\$', line) + +def check_for_do(line): + if not_continuation(line): + if re.search('^\s*for ', line): + if not re.search(';\s*do(\b|$)', line): + print_error('E010: Do not on same line as for', line) + + +def check_if_then(line): + if not_continuation(line): + if re.search('^\s*if \[', line): + if not re.search(';\s*then(\b|$)', line): + print_error('E011: Then non on same line as if', line) + + def check_no_trailing_whitespace(line): if re.search('[ \t]+$', line): print_error('E001: Trailing Whitespace', line) @@ -100,6 +127,8 @@ def check_files(files): check_no_trailing_whitespace(logical_line) check_indents(logical_line) + check_for_do(logical_line) + check_if_then(logical_line) def get_options(): diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 41b184c6ac..d172c7ba1b 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -194,8 +194,7 @@ function wait_for_VM_to_halt() { while true do state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted) - if [ -n "$state" ] - then + if [ -n "$state" ]; then break else echo -n "." diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index 7b0d891493..8412fdc3ca 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -63,8 +63,7 @@ get_params() ;; esac done - if [[ -z $BRIDGE ]] - then + if [[ -z $BRIDGE ]]; then BRIDGE=xenbr0 fi @@ -91,8 +90,7 @@ xe_min() find_network() { result=$(xe_min network-list bridge="$1") - if [ "$result" = "" ] - then + if [ "$result" = "" ]; then result=$(xe_min network-list name-label="$1") fi echo "$result" @@ -121,8 +119,7 @@ destroy_vifs() { local v="$1" IFS=, - for vif in $(xe_min vif-list vm-uuid="$v") - do + for vif in $(xe_min vif-list vm-uuid="$v"); do xe vif-destroy uuid="$vif" done unset IFS diff --git a/tools/xen/scripts/on_exit.sh b/tools/xen/scripts/on_exit.sh index a4db39c225..2441e3d84a 100755 --- a/tools/xen/scripts/on_exit.sh +++ b/tools/xen/scripts/on_exit.sh @@ -7,8 +7,7 @@ declare -a on_exit_hooks on_exit() { - for i in $(seq $((${#on_exit_hooks[*]} - 1)) -1 0) - do + for i in $(seq $((${#on_exit_hooks[*]} - 1)) -1 0); do eval "${on_exit_hooks[$i]}" done } @@ -17,8 +16,7 @@ add_on_exit() { local n=${#on_exit_hooks[*]} on_exit_hooks[$n]="$*" - if [[ $n -eq 0 ]] - then + if [[ $n -eq 0 ]]; then trap on_exit EXIT fi } diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh index 373d996760..838f86a525 100755 --- a/tools/xen/test_functions.sh +++ b/tools/xen/test_functions.sh @@ -227,16 +227,14 @@ function test_get_local_sr_path { } [ "$1" = "run_tests" ] && { - for testname in $($0) - do + for testname in $($0); do echo "$testname" before_each_test ( set -eux $testname ) - if [ "$?" != "0" ] - then + if [ "$?" != "0" ]; then echo "FAIL" exit 1 else From 86a8e9767912ae957cbbf6ea20a08106011a7728 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 4 Feb 2014 15:20:15 +0100 Subject: [PATCH 0113/4119] Add while/until to the for/do rule Like 'for/do' check that the while/until operator are on the same line with the do. Fixes some pep8 error along the way. Change-Id: I440afe60691263365bf35310bf4212d94f30c339 --- tools/bash8.py | 10 +++++++--- tools/create_userrc.sh | 3 +-- tools/xen/install_os_domU.sh | 3 +-- tools/xen/scripts/install-os-vpx.sh | 3 +-- 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/tools/bash8.py b/tools/bash8.py index 9fb51ecc9e..7552e0d642 100755 --- a/tools/bash8.py +++ b/tools/bash8.py @@ -47,7 +47,7 @@ def register_ignores(ignores): global IGNORE if ignores: - IGNORE='^(' + '|'.join(ignores.split(',')) + ')' + IGNORE = '^(' + '|'.join(ignores.split(',')) + ')' def should_ignore(error): @@ -64,11 +64,15 @@ def print_error(error, line): def not_continuation(line): return not re.search('\\\\$', line) + def check_for_do(line): if not_continuation(line): - if re.search('^\s*for ', line): + match = re.match('^\s*(for|while|until)\s', line) + if match: + operator = match.group(1).strip() if not re.search(';\s*do(\b|$)', line): - print_error('E010: Do not on same line as for', line) + print_error('E010: Do not on same line as %s' % operator, + line) def check_if_then(line): diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index 5f4c48660b..e2d855c4df 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -71,8 +71,7 @@ MODE="" ROLE=Member USER_NAME="" USER_PASS="" -while [ $# -gt 0 ] -do +while [ $# -gt 0 ]; do case "$1" in -h|--help) display_help; exit 0 ;; --os-username) export OS_USERNAME=$2; shift ;; diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index d172c7ba1b..d0d81a2d7e 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -191,8 +191,7 @@ function wait_for_VM_to_halt() { domid=$(xe vm-list name-label="$GUEST_NAME" params=dom-id minimal=true) port=$(xenstore-read /local/domain/$domid/console/vnc-port) echo "vncviewer -via root@$mgmt_ip localhost:${port:2}" - while true - do + while true; do state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted) if [ -n "$state" ]; then break diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index 8412fdc3ca..b9b65fdce2 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -42,8 +42,7 @@ EOF get_params() { - while getopts "hbn:r:l:t:" OPTION; - do + while getopts "hbn:r:l:t:" OPTION; do case $OPTION in h) usage exit 1 From d15c8a082464695a4e715bab093bf4d876bbc341 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Tue, 4 Feb 2014 12:38:14 +0000 Subject: [PATCH 0114/4119] Move install responsibilities to domU As we are moving forward to test XenAPI in the gate, it is necessary to move dom0 related modifications to be performed from domU. For this purpose, a new user is created, and that user should be used to talk to dom0 from domU. This change creates that user, makes it possible for dom0 to log in to domU with that account, and configure that account to be able to talk down to dom0. Also move several steps to the nova xenserver plugin: - dom0 plugin installation - create kernels and images directory - install console rotate script - configure a cron to execute console rotate script Configuration changes: A new configuration option, DOMZERO_USER has been created, that specifies a user account that is configured to be able to do passwordless ssh to dom0. Change-Id: If9de0b297a67b7cdb5de78d8dd0e8b2ca578b601 --- lib/nova_plugins/hypervisor-xenserver | 28 ++++++++++++++ stackrc | 4 ++ tools/xen/functions | 8 ++++ tools/xen/install_os_domU.sh | 51 ++++++++++++++++---------- tools/xen/prepare_guest.sh | 53 +++++++++++++++++++++++++++ tools/xen/prepare_guest_template.sh | 2 +- 6 files changed, 126 insertions(+), 20 deletions(-) diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver index f47994f187..9843261065 100644 --- a/lib/nova_plugins/hypervisor-xenserver +++ b/lib/nova_plugins/hypervisor-xenserver @@ -56,6 +56,34 @@ function configure_nova_hypervisor() { # Need to avoid crash due to new firewall support XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER" + + local dom0_ip + dom0_ip=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3-) + + local ssh_dom0 + ssh_dom0="sudo -u $DOMZERO_USER ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@$dom0_ip" + + # install nova plugins to dom0 + tar -czf - -C $NOVA_DIR/plugins/xenserver/xenapi/etc/xapi.d/plugins/ ./ | + $ssh_dom0 'tar -xzf - -C /etc/xapi.d/plugins/ && chmod a+x /etc/xapi.d/plugins/*' + + # install console logrotate script + tar -czf - -C $NOVA_DIR/tools/xenserver/ rotate_xen_guest_logs.sh | + $ssh_dom0 'tar -xzf - -C /root/ && chmod +x /root/rotate_xen_guest_logs.sh && mkdir -p /var/log/xen/guest' + + # Create a cron job that will rotate guest logs + $ssh_dom0 crontab - << CRONTAB +* * * * * /root/rotate_xen_guest_logs.sh +CRONTAB + + # Create directories for kernels and images + { + echo "set -eux" + cat $TOP_DIR/tools/xen/functions + echo "create_directory_for_images" + echo "create_directory_for_kernels" + } | $ssh_dom0 + } # install_nova_hypervisor() - Install external components diff --git a/stackrc b/stackrc index e89d25e4ab..db5b1889af 100644 --- a/stackrc +++ b/stackrc @@ -245,6 +245,10 @@ case "$VIRT_DRIVER" in xenserver) # Xen config common to nova and neutron XENAPI_USER=${XENAPI_USER:-"root"} + # This user will be used for dom0 - domU communication + # should be able to log in to dom0 without a password + # will be used to install the plugins + DOMZERO_USER=${DOMZERO_USER:-"domzero"} ;; *) ;; diff --git a/tools/xen/functions b/tools/xen/functions index 97c56bc1af..ab0be84bd2 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -336,3 +336,11 @@ function max_vcpus() { xe vm-param-set uuid=$vm VCPUs-max=$cpu_count xe vm-param-set uuid=$vm VCPUs-at-startup=$cpu_count } + +function get_domid() { + local vm_name_label + + vm_name_label="$1" + + xe vm-list name-label="$vm_name_label" params=dom-id minimal=true +} diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 41b184c6ac..663f09c1b4 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -67,21 +67,6 @@ fi # Install plugins -## Nova plugins -NOVA_ZIPBALL_URL=${NOVA_ZIPBALL_URL:-$(zip_snapshot_location $NOVA_REPO $NOVA_BRANCH)} -EXTRACTED_NOVA=$(extract_remote_zipball "$NOVA_ZIPBALL_URL") -install_xapi_plugins_from "$EXTRACTED_NOVA" - -LOGROT_SCRIPT=$(find "$EXTRACTED_NOVA" -name "rotate_xen_guest_logs.sh" -print) -if [ -n "$LOGROT_SCRIPT" ]; then - mkdir -p "/var/log/xen/guest" - cp "$LOGROT_SCRIPT" /root/consolelogrotate - chmod +x /root/consolelogrotate - echo "* * * * * /root/consolelogrotate" | crontab -fi - -rm -rf "$EXTRACTED_NOVA" - ## Install the netwrap xapi plugin to support agent control of dom0 networking if [[ "$ENABLED_SERVICES" =~ "q-agt" && "$Q_PLUGIN" = "openvswitch" ]]; then NEUTRON_ZIPBALL_URL=${NEUTRON_ZIPBALL_URL:-$(zip_snapshot_location $NEUTRON_REPO $NEUTRON_BRANCH)} @@ -90,9 +75,6 @@ if [[ "$ENABLED_SERVICES" =~ "q-agt" && "$Q_PLUGIN" = "openvswitch" ]]; then rm -rf "$EXTRACTED_NEUTRON" fi -create_directory_for_kernels -create_directory_for_images - # # Configure Networking # @@ -188,7 +170,7 @@ function wait_for_VM_to_halt() { set +x echo "Waiting for the VM to halt. Progress in-VM can be checked with vncviewer:" mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.') - domid=$(xe vm-list name-label="$GUEST_NAME" params=dom-id minimal=true) + domid=$(get_domid "$GUEST_NAME") port=$(xenstore-read /local/domain/$domid/console/vnc-port) echo "vncviewer -via root@$mgmt_ip localhost:${port:2}" while true @@ -361,6 +343,37 @@ else fi fi +# Create an ssh-keypair, and set it up for dom0 user +rm -f /root/dom0key /root/dom0key.pub +ssh-keygen -f /root/dom0key -P "" -C "dom0" +DOMID=$(get_domid "$GUEST_NAME") + +xenstore-write /local/domain/$DOMID/authorized_keys/$DOMZERO_USER "$(cat /root/dom0key.pub)" +xenstore-chmod -u /local/domain/$DOMID/authorized_keys/$DOMZERO_USER r$DOMID + +function run_on_appliance() { + ssh \ + -i /root/dom0key \ + -o UserKnownHostsFile=/dev/null \ + -o StrictHostKeyChecking=no \ + -o BatchMode=yes \ + "$DOMZERO_USER@$OS_VM_MANAGEMENT_ADDRESS" "$@" +} + +# Wait until we can log in to the appliance +while ! run_on_appliance true; do + sleep 1 +done + +# Remove authenticated_keys updater cronjob +echo "" | run_on_appliance crontab - + +# Generate a passwordless ssh key for domzero user +echo "ssh-keygen -f /home/$DOMZERO_USER/.ssh/id_rsa -C $DOMZERO_USER@appliance -N \"\" -q" | run_on_appliance + +# Authenticate that user to dom0 +run_on_appliance cat /home/$DOMZERO_USER/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys + # If we have copied our ssh credentials, use ssh to monitor while the installation runs WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1} COPYENV=${COPYENV:-1} diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index 05ac86cf99..094612624b 100755 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -18,6 +18,57 @@ set -o xtrace GUEST_PASSWORD="$1" XS_TOOLS_PATH="$2" STACK_USER="$3" +DOMZERO_USER="$4" + + +function setup_domzero_user() { + local username + + username="$1" + + local key_updater_script + local sudoers_file + key_updater_script="/home/$username/update_authorized_keys.sh" + sudoers_file="/etc/sudoers.d/allow_$username" + + # Create user + adduser --disabled-password --quiet "$username" --gecos "$username" + + # Give passwordless sudo + cat > $sudoers_file << EOF + $username ALL = NOPASSWD: ALL +EOF + chmod 0440 $sudoers_file + + # A script to populate this user's authenticated_keys from xenstore + cat > $key_updater_script << EOF +#!/bin/bash +set -eux + +DOMID=\$(sudo xenstore-read domid) +sudo xenstore-exists /local/domain/\$DOMID/authorized_keys/$username +sudo xenstore-read /local/domain/\$DOMID/authorized_keys/$username > /home/$username/xenstore_value +cat /home/$username/xenstore_value > /home/$username/.ssh/authorized_keys +EOF + + # Give the key updater to the user + chown $username:$username $key_updater_script + chmod 0700 $key_updater_script + + # Setup the .ssh folder + mkdir -p /home/$username/.ssh + chown $username:$username /home/$username/.ssh + chmod 0700 /home/$username/.ssh + touch /home/$username/.ssh/authorized_keys + chown $username:$username /home/$username/.ssh/authorized_keys + chmod 0600 /home/$username/.ssh/authorized_keys + + # Setup the key updater as a cron job + crontab -u $username - << EOF +* * * * * $key_updater_script +EOF + +} # Install basics apt-get update @@ -48,6 +99,8 @@ useradd $STACK_USER -s /bin/bash -d /opt/stack -G libvirtd echo $STACK_USER:$GUEST_PASSWORD | chpasswd echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +setup_domzero_user "$DOMZERO_USER" + # Add an udev rule, so that new block devices could be written by stack user cat > /etc/udev/rules.d/50-openstack-blockdev.rules << EOF KERNEL=="xvd[b-z]", GROUP="$STACK_USER", MODE="0660" diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh index 546ac99cd9..a25535dc22 100755 --- a/tools/xen/prepare_guest_template.sh +++ b/tools/xen/prepare_guest_template.sh @@ -76,7 +76,7 @@ cp $STAGING_DIR/etc/rc.local $STAGING_DIR/etc/rc.local.preparebackup cat <$STAGING_DIR/etc/rc.local #!/bin/sh -e bash /opt/stack/prepare_guest.sh \\ - "$GUEST_PASSWORD" "$XS_TOOLS_PATH" "$STACK_USER" \\ + "$GUEST_PASSWORD" "$XS_TOOLS_PATH" "$STACK_USER" "$DOMZERO_USER" \\ > /opt/stack/prepare_guest.log 2>&1 EOF From a7a23addd3634d890a44ff3e44ebefe29a3f7910 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Wed, 5 Feb 2014 15:19:27 -0600 Subject: [PATCH 0115/4119] Update orchestration-related service names in template catalog The orchestration-related service names were not consistent with the other AWS compatibility and native API names, so this change makes them consistent. Related-Bug: #1240138 Change-Id: I29a26bc6b0ddab0bff579a900e28da65df097a96 --- files/default_catalog.templates | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/files/default_catalog.templates b/files/default_catalog.templates index e64f68f033..ff00e38e09 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -50,12 +50,12 @@ catalog.RegionOne.image.name = Image Service catalog.RegionOne.cloudformation.publicURL = http://%SERVICE_HOST%:8000/v1 catalog.RegionOne.cloudformation.adminURL = http://%SERVICE_HOST%:8000/v1 catalog.RegionOne.cloudformation.internalURL = http://%SERVICE_HOST%:8000/v1 -catalog.RegionOne.cloudformation.name = Heat CloudFormation Service +catalog.RegionOne.cloudformation.name = CloudFormation service catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s -catalog.RegionOne.orchestration.name = Heat Service +catalog.RegionOne.orchestration.name = Orchestration Service catalog.RegionOne.metering.publicURL = http://%SERVICE_HOST%:8777/v1 catalog.RegionOne.metering.adminURL = http://%SERVICE_HOST%:8777/v1 From d5d4974cb72880799d7ec736237ca01eacb2f6da Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Thu, 6 Feb 2014 16:00:08 +0100 Subject: [PATCH 0116/4119] Cleanup cinder-rootwrap support cinder_rootwrap support in devstack handled a number of now-abandoned use cases: - no $CINDER_DIR/etc/cinder/rootwrap.d (old-style rootwrap) - using oslo-rootwrap instead of cinder-rootwrap (abandoned experiment) This change removes unused code paths and aligns configure_cinder_rootwrap() with configure_nova_rootwrap(). Change-Id: I387808dae0e064cc9c894c74ab78e86124f08dd2 --- lib/cinder | 53 +++++++++++++++++++---------------------------------- 1 file changed, 19 insertions(+), 34 deletions(-) diff --git a/lib/cinder b/lib/cinder index 9f70b2a0c9..75e9c97e80 100644 --- a/lib/cinder +++ b/lib/cinder @@ -170,43 +170,28 @@ function cleanup_cinder() { function configure_cinder_rootwrap() { # Set the paths of certain binaries CINDER_ROOTWRAP=$(get_rootwrap_location cinder) - if [[ ! -x $CINDER_ROOTWRAP ]]; then - CINDER_ROOTWRAP=$(get_rootwrap_location oslo) - if [[ ! -x $CINDER_ROOTWRAP ]]; then - die $LINENO "No suitable rootwrap found." - fi - fi - # If Cinder ships the new rootwrap filters files, deploy them - # (owned by root) and add a parameter to $CINDER_ROOTWRAP - ROOTWRAP_CINDER_SUDOER_CMD="$CINDER_ROOTWRAP" - if [[ -d $CINDER_DIR/etc/cinder/rootwrap.d ]]; then - # Wipe any existing rootwrap.d files first - if [[ -d $CINDER_CONF_DIR/rootwrap.d ]]; then - sudo rm -rf $CINDER_CONF_DIR/rootwrap.d - fi - # Deploy filters to /etc/cinder/rootwrap.d - sudo mkdir -m 755 $CINDER_CONF_DIR/rootwrap.d - sudo cp $CINDER_DIR/etc/cinder/rootwrap.d/*.filters $CINDER_CONF_DIR/rootwrap.d - sudo chown -R root:root $CINDER_CONF_DIR/rootwrap.d - sudo chmod 644 $CINDER_CONF_DIR/rootwrap.d/* - # Set up rootwrap.conf, pointing to /etc/cinder/rootwrap.d - if [[ -f $CINDER_DIR/etc/cinder/rootwrap.conf ]]; then - sudo cp $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR/ - else - # rootwrap.conf is no longer shipped in Cinder itself - echo "filters_path=" | sudo tee $CINDER_CONF_DIR/rootwrap.conf > /dev/null - fi - sudo sed -e "s:^filters_path=.*$:filters_path=$CINDER_CONF_DIR/rootwrap.d:" -i $CINDER_CONF_DIR/rootwrap.conf - sudo chown root:root $CINDER_CONF_DIR/rootwrap.conf - sudo chmod 0644 $CINDER_CONF_DIR/rootwrap.conf - # Specify rootwrap.conf as first parameter to rootwrap - CINDER_ROOTWRAP="$CINDER_ROOTWRAP $CINDER_CONF_DIR/rootwrap.conf" - ROOTWRAP_CINDER_SUDOER_CMD="$CINDER_ROOTWRAP *" + # Deploy new rootwrap filters files (owned by root). + # Wipe any existing rootwrap.d files first + if [[ -d $CINDER_CONF_DIR/rootwrap.d ]]; then + sudo rm -rf $CINDER_CONF_DIR/rootwrap.d fi - + # Deploy filters to /etc/cinder/rootwrap.d + sudo mkdir -m 755 $CINDER_CONF_DIR/rootwrap.d + sudo cp $CINDER_DIR/etc/cinder/rootwrap.d/*.filters $CINDER_CONF_DIR/rootwrap.d + sudo chown -R root:root $CINDER_CONF_DIR/rootwrap.d + sudo chmod 644 $CINDER_CONF_DIR/rootwrap.d/* + # Set up rootwrap.conf, pointing to /etc/cinder/rootwrap.d + sudo cp $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR/ + sudo sed -e "s:^filters_path=.*$:filters_path=$CINDER_CONF_DIR/rootwrap.d:" -i $CINDER_CONF_DIR/rootwrap.conf + sudo chown root:root $CINDER_CONF_DIR/rootwrap.conf + sudo chmod 0644 $CINDER_CONF_DIR/rootwrap.conf + # Specify rootwrap.conf as first parameter to rootwrap + ROOTWRAP_CSUDOER_CMD="$CINDER_ROOTWRAP $CINDER_CONF_DIR/rootwrap.conf *" + + # Set up the rootwrap sudoers for cinder TEMPFILE=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CINDER_SUDOER_CMD" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CSUDOER_CMD" >$TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap From 6c9430e5679c36ecdc827184cf160297458c4a3c Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Thu, 6 Feb 2014 17:06:00 +0000 Subject: [PATCH 0117/4119] Set uri_v3 in tempest config This properly sets the v3 uri for keystone in the tempest config. Previously tempest would just guess the v3 uri by replacing v2 with v3. However, moving forward this will no longer be the case so devstack should properly set this uri to enable tempest to use the keystone v3 api in addition to the v2. Change-Id: Ib02b2e9f24d8ca1f381186c48747ca0fbc45f3f1 --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 06183b107b..1eea9b6bb4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -251,6 +251,7 @@ function configure_tempest() { # Identity iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" + iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v3/" iniset $TEMPEST_CONFIG identity password "$password" iniset $TEMPEST_CONFIG identity alt_username $ALT_USERNAME iniset $TEMPEST_CONFIG identity alt_password "$password" From e4fa72132228688d2fe74dd974fe04b0fe4c3d6b Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 15 Jan 2014 15:04:49 -0600 Subject: [PATCH 0118/4119] Begin is_service_enabled() cleanup This converts the special cases in the is_service_enabled() function to call individual functions declared by the projects. This allows projects that are not in the DevStack repo and called via the extras.d plugin to handle an equivalent service alias. * Ceilometer * Cinder * Glance * Neutron * Nova * Swift TODO: remove the tests from is_service_enabled() after a transition period Patch Set 2: Rebased Change-Id: Ic78be433f93a9dd5f46be548bdbd4c984e0da6e7 --- clean.sh | 2 +- exercises/boot_from_volume.sh | 8 +++----- exercises/euca.sh | 5 ----- exercises/floating_ips.sh | 8 +++----- exercises/volumes.sh | 8 +++----- functions | 10 ++++++++++ lib/ceilometer | 9 ++++++++- lib/cinder | 8 ++++++++ lib/glance | 7 +++++++ lib/neutron | 7 +++++++ lib/nova | 14 ++++++++++++++ lib/swift | 7 +++++++ lib/template | 8 ++++++++ stack.sh | 2 +- stackrc | 2 +- unstack.sh | 2 +- 16 files changed, 82 insertions(+), 25 deletions(-) diff --git a/clean.sh b/clean.sh index e16bdb7f36..09f08dc8c2 100755 --- a/clean.sh +++ b/clean.sh @@ -97,7 +97,7 @@ if is_service_enabled ldap; then fi # Do the hypervisor cleanup until this can be moved back into lib/nova -if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then +if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then cleanup_nova_hypervisor fi diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index ed8ba6310e..79120460b8 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -30,14 +30,12 @@ TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions source $TOP_DIR/functions +# Import project functions +source $TOP_DIR/lib/cinder + # Import configuration source $TOP_DIR/openrc -# Import neutron functions if needed -if is_service_enabled neutron; then - source $TOP_DIR/lib/neutron -fi - # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/exercises/euca.sh b/exercises/euca.sh index 51b2644458..ad852a4f79 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -33,11 +33,6 @@ source $TOP_DIR/functions # Import EC2 configuration source $TOP_DIR/eucarc -# Import neutron functions if needed -if is_service_enabled neutron; then - source $TOP_DIR/lib/neutron -fi - # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 4ca90a5c35..b981aa8294 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -27,14 +27,12 @@ TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions source $TOP_DIR/functions +# Import project functions +source $TOP_DIR/lib/neutron + # Import configuration source $TOP_DIR/openrc -# Import neutron functions if needed -if is_service_enabled neutron; then - source $TOP_DIR/lib/neutron -fi - # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 21b5d21c04..33e24589eb 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -27,14 +27,12 @@ TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions source $TOP_DIR/functions +# Import project functions +source $TOP_DIR/lib/cinder + # Import configuration source $TOP_DIR/openrc -# Import neutron functions if needed -if is_service_enabled neutron; then - source $TOP_DIR/lib/neutron -fi - # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/functions b/functions index 281b6767c5..dc3278b56d 100644 --- a/functions +++ b/functions @@ -840,6 +840,16 @@ function is_service_enabled() { services=$@ for service in ${services}; do [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 + + # Look for top-level 'enabled' function for this service + if type is_${service}_enabled >/dev/null 2>&1; then + # A function exists for this service, use it + is_${service}_enabled + return $? + fi + + # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled() + # are implemented [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && return 0 [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0 [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0 diff --git a/lib/ceilometer b/lib/ceilometer index f9c76915d5..4ca77bb72b 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -59,7 +59,14 @@ TEMPEST_SERVICES+=,ceilometer # Functions # --------- -# + +# Test if any Ceilometer services are enabled +# is_ceilometer_enabled +function is_ceilometer_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"ceilometer-" ]] && return 0 + return 1 +} + # create_ceilometer_accounts() - Set up common required ceilometer accounts create_ceilometer_accounts() { diff --git a/lib/cinder b/lib/cinder index 9f70b2a0c9..3ec0fd4f09 100644 --- a/lib/cinder +++ b/lib/cinder @@ -85,6 +85,14 @@ TEMPEST_SERVICES+=,cinder # Functions # --------- + +# Test if any Cinder services are enabled +# is_cinder_enabled +function is_cinder_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"c-" ]] && return 0 + return 1 +} + # _clean_lvm_lv removes all cinder LVM volumes # # Usage: _clean_lvm_lv $VOLUME_GROUP $VOLUME_NAME_PREFIX diff --git a/lib/glance b/lib/glance index a5cb360743..1ebeeb3b2e 100644 --- a/lib/glance +++ b/lib/glance @@ -59,6 +59,13 @@ TEMPEST_SERVICES+=,glance # Functions # --------- +# Test if any Glance services are enabled +# is_glance_enabled +function is_glance_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"g-" ]] && return 0 + return 1 +} + # cleanup_glance() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_glance() { diff --git a/lib/neutron b/lib/neutron index 81db2a74d1..5bd38bcf73 100644 --- a/lib/neutron +++ b/lib/neutron @@ -244,6 +244,13 @@ TEMPEST_SERVICES+=,neutron # Functions # --------- +# Test if any Neutron services are enabled +# is_neutron_enabled +function is_neutron_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0 + return 1 +} + # configure_neutron() # Set common config for all neutron server and agents. function configure_neutron() { diff --git a/lib/nova b/lib/nova index dbc5c3db44..c6d99367c2 100644 --- a/lib/nova +++ b/lib/nova @@ -129,6 +129,20 @@ TEMPEST_SERVICES+=,nova # Functions # --------- +# Test if any Nova services are enabled +# is_nova_enabled +function is_nova_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"n-" ]] && return 0 + return 1 +} + +# Test if any Nova Cell services are enabled +# is_nova_enabled +function is_n-cell_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"n-cell-" ]] && return 0 + return 1 +} + # Helper to clean iptables rules function clean_iptables() { # Delete rules diff --git a/lib/swift b/lib/swift index 28ca8a80df..197c01b63c 100644 --- a/lib/swift +++ b/lib/swift @@ -118,6 +118,13 @@ TEMPEST_SERVICES+=,swift # Functions # --------- +# Test if any Swift services are enabled +# is_swift_enabled +function is_swift_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"s-" ]] && return 0 + return 1 +} + # cleanup_swift() - Remove residual data files function cleanup_swift() { rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} diff --git a/lib/template b/lib/template index 629e110271..b8e7c4d86f 100644 --- a/lib/template +++ b/lib/template @@ -10,6 +10,7 @@ # ``stack.sh`` calls the entry points in this order: # +# - is_XXXX_enabled # - install_XXXX # - configure_XXXX # - init_XXXX @@ -35,6 +36,13 @@ XXX_CONF_DIR=/etc/XXXX # Entry Points # ------------ +# Test if any XXXX services are enabled +# is_XXXX_enabled +function is_XXXX_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"XX-" ]] && return 0 + return 1 +} + # cleanup_XXXX() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_XXXX() { diff --git a/stack.sh b/stack.sh index 15e14303cf..d379d51c6e 100755 --- a/stack.sh +++ b/stack.sh @@ -1096,7 +1096,7 @@ if is_service_enabled s-proxy; then fi # Launch the Glance services -if is_service_enabled g-api g-reg; then +if is_service_enabled glance; then echo_summary "Starting Glance" start_glance fi diff --git a/stackrc b/stackrc index e89d25e4ab..2527b0ad84 100644 --- a/stackrc +++ b/stackrc @@ -35,7 +35,7 @@ fi # enable_service neutron # # Optional, to enable tempest configuration as part of devstack # enable_service tempest -ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,mysql +ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,mysql # Tell Tempest which services are available. The default is set here as # Tempest falls late in the configuration sequence. This differs from diff --git a/unstack.sh b/unstack.sh index 92d0642c38..c233f93e6b 100755 --- a/unstack.sh +++ b/unstack.sh @@ -104,7 +104,7 @@ if is_service_enabled nova; then stop_nova fi -if is_service_enabled g-api g-reg; then +if is_service_enabled glance; then stop_glance fi From dd710b4f12bb09abdc0dfa4a5f5c4aba81eba650 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Fri, 7 Feb 2014 16:46:17 +0000 Subject: [PATCH 0119/4119] Make neutron tempest run with tenant isolation by default This commit removes the workaround that switched tempest tenant isolation to false if Neutron was enabled. Recent changes to both neutron and tempest should make this safe finally. Change-Id: I929fcc73a7ef9a10f01af422ff62f9d451d52ae3 --- lib/tempest | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/tempest b/lib/tempest index 06183b107b..0fc0de26c8 100644 --- a/lib/tempest +++ b/lib/tempest @@ -266,11 +266,6 @@ function configure_tempest() { # Compute iniset $TEMPEST_CONFIG compute change_password_available False - # Note(nati) current tempest don't create network for each tenant - # so reuse same tenant for now - if is_service_enabled neutron; then - TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-False} - fi iniset $TEMPEST_CONFIG compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED iniset $TEMPEST_CONFIG compute network_for_ssh $PRIVATE_NETWORK_NAME From 67db4a9bd5c0d0a119c244e8dbb1a0a1990944b8 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 7 Feb 2014 16:02:37 -0500 Subject: [PATCH 0120/4119] remove database init from horizon horizon default config no longer uses a session database, it uses signed cookies instead, so we can stop doing db initialization and cleanup (which based on errexit we weren't doing correctly anyway). Change-Id: Icae4318e2784486db2888cbf353e95ac9a5d7cba --- lib/horizon | 9 --------- 1 file changed, 9 deletions(-) diff --git a/lib/horizon b/lib/horizon index c64d8502ba..2f5795d1ca 100644 --- a/lib/horizon +++ b/lib/horizon @@ -81,9 +81,6 @@ function configure_horizon() { # init_horizon() - Initialize databases, etc. function init_horizon() { - # Remove stale session database. - rm -f $HORIZON_DIR/openstack_dashboard/local/dashboard_openstack.sqlite3 - # ``local_settings.py`` is used to override horizon default settings. local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py cp $HORIZON_SETTINGS $local_settings @@ -106,12 +103,6 @@ function init_horizon() { _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_vpn True fi - # Initialize the horizon database (it stores sessions and notices shown to - # users). The user system is external (keystone). - cd $HORIZON_DIR - python manage.py syncdb --noinput - cd $TOP_DIR - # Create an empty directory that apache uses as docroot sudo mkdir -p $HORIZON_DIR/.blackhole From 5ed43bf82ac9eeb30ca543bcf695f9d45ddf77f5 Mon Sep 17 00:00:00 2001 From: Shane Wang Date: Fri, 7 Feb 2014 11:01:43 +0800 Subject: [PATCH 0121/4119] Fix misspellings in devstack Fix misspellings detected by: * pip install misspellings * git ls-files | grep -v locale | misspellings -f - Change-Id: I19726438d15cd27b813504aac530e7e53c4def12 Closes-Bug: #1257295 --- tools/xen/xenrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/xenrc b/tools/xen/xenrc index cd282341cb..b355a10d4f 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -35,7 +35,7 @@ XEN_INT_BRIDGE_OR_NET_NAME="OpenStack VM Integration Network" GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} # Extracted variables for OpenStack VM network device numbers. -# Make sure, they form a continous sequence starting from 0 +# Make sure they form a continuous sequence starting from 0 MGT_DEV_NR=0 VM_DEV_NR=1 PUB_DEV_NR=2 From d1cd0c66487cc00fa50c6638fb233e04b023d744 Mon Sep 17 00:00:00 2001 From: Daniel Kuffner Date: Sat, 8 Feb 2014 12:35:48 +0100 Subject: [PATCH 0122/4119] Docker install script fails to install docker registry The tools/docker/install_docker.sh script fails during the installation/setup of the docker registry. The problem is that the used environment variables are wrong. REGISTRY_IMAGE > DOCKER_REGISTRY_IMAGE REGISTRY_IMAGE_NAME > DOCKER_REGISTRY_IMAGE_NAME Change-Id: I16f051abe5c426f295c69d518b49c1b9a7b4cc94 --- tools/docker/install_docker.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh index 4fa23864fb..b9e1b242dd 100755 --- a/tools/docker/install_docker.sh +++ b/tools/docker/install_docker.sh @@ -60,5 +60,5 @@ docker pull $DOCKER_IMAGE docker tag $DOCKER_IMAGE $DOCKER_IMAGE_NAME # Get docker-registry image -docker pull $REGISTRY_IMAGE -docker tag $REGISTRY_IMAGE $REGISTRY_IMAGE_NAME +docker pull $DOCKER_REGISTRY_IMAGE +docker tag $DOCKER_REGISTRY_IMAGE $DOCKER_REGISTRY_IMAGE_NAME From d73af8787280002321ab52a3262a2d0b5a8e54cd Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 6 Feb 2014 15:33:52 -0800 Subject: [PATCH 0123/4119] If n-api-meta is being run, remove from NOVA_ENABLED_APIS If running n-api-meta as a separate service we shouldn't run it inside of n-api. This patch is in support of Iddd44f7ee43b9287a788dea49eaa484316f8da04 Change-Id: I8a54cf13dc6083b78e89c9ea5413d9e4d8d4b37a Related-Bug: #1270845 --- lib/nova | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/nova b/lib/nova index dbc5c3db44..3ee28faaec 100644 --- a/lib/nova +++ b/lib/nova @@ -389,6 +389,10 @@ function create_nova_conf() { fi if is_service_enabled n-api; then + if is_service_enabled n-api-meta; then + # If running n-api-meta as a separate service + NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//") + fi iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS" if is_service_enabled tls-proxy; then # Set the service port for a proxy to take the original From 9e032c2d374f80612c010775dd8d71389d5d09a3 Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Mon, 10 Feb 2014 11:36:25 +0100 Subject: [PATCH 0124/4119] read_password needs to store in .localrc.auto if local.conf is used. when running stack.sh with no passwords in local.conf read_password() creates localrc and local.conf is ignored Change-Id: I25ad07569d2b42b190449591d5a01ade8022392c --- stack.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 1a1460d2f3..303541d63e 100755 --- a/stack.sh +++ b/stack.sh @@ -362,7 +362,11 @@ function read_password { var=$1; msg=$2 pw=${!var} - localrc=$TOP_DIR/localrc + if [[ -f $RC_DIR/localrc ]]; then + localrc=$TOP_DIR/localrc + else + localrc=$TOP_DIR/.localrc.auto + fi # If the password is not defined yet, proceed to prompt user for a password. if [ ! $pw ]; then From 6b1cb10809ae4c2cc9a4b39e0298458f0ecd4853 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Mon, 10 Feb 2014 09:59:43 -0800 Subject: [PATCH 0125/4119] Add cliff, pycadf, stevedore, & taskflow from oslo Oslo has adopted 4 libraries that were previously on stackforge, so we can now install them from source. Change-Id: I6b6e20a7884b47ade466fc38641a5ac1a5f3e146 --- lib/oslo | 16 ++++++++++++++++ stackrc | 16 ++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/lib/oslo b/lib/oslo index f644ed76c3..b089842ae4 100644 --- a/lib/oslo +++ b/lib/oslo @@ -20,9 +20,13 @@ set +o xtrace # Defaults # -------- +CLIFF_DIR=$DEST/cliff OSLOCFG_DIR=$DEST/oslo.config OSLOMSG_DIR=$DEST/oslo.messaging OSLORWRAP_DIR=$DEST/oslo.rootwrap +PYCADF_DIR=$DEST/pycadf +STEVEDORE_DIR=$DEST/stevedore +TASKFLOW_DIR=$DEST/taskflow # Entry Points # ------------ @@ -33,6 +37,9 @@ function install_oslo() { # for a smoother transition of existing users. cleanup_oslo + git_clone $CLIFF_REPO $CLIFF_DIR $CLIFF_BRANCH + setup_develop $CLIFF_DIR + git_clone $OSLOCFG_REPO $OSLOCFG_DIR $OSLOCFG_BRANCH setup_develop $OSLOCFG_DIR @@ -41,6 +48,15 @@ function install_oslo() { git_clone $OSLORWRAP_REPO $OSLORWRAP_DIR $OSLORWRAP_BRANCH setup_develop $OSLORWRAP_DIR + + git_clone $PYCADF_REPO $PYCADF_DIR $PYCADF_BRANCH + setup_develop $PYCADF_DIR + + git_clone $STEVEDORE_REPO $STEVEDORE_DIR $STEVEDORE_BRANCH + setup_develop $STEVEDORE_DIR + + git_clone $TASKFLOW_REPO $TASKFLOW_DIR $TASKFLOW_BRANCH + setup_develop $TASKFLOW_DIR } # cleanup_oslo() - purge possibly old versions of oslo diff --git a/stackrc b/stackrc index 7eed60cb2c..729c2f5b40 100644 --- a/stackrc +++ b/stackrc @@ -140,6 +140,10 @@ NOVACLIENT_BRANCH=${NOVACLIENT_BRANCH:-master} OPENSTACKCLIENT_REPO=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git} OPENSTACKCLIENT_BRANCH=${OPENSTACKCLIENT_BRANCH:-master} +# cliff command line framework +CLIFF_REPO=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git} +CLIFF_BRANCH=${CLIFF_BRANCH:-master} + # oslo.config OSLOCFG_REPO=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git} OSLOCFG_BRANCH=${OSLOCFG_BRANCH:-master} @@ -152,6 +156,18 @@ OSLOMSG_BRANCH=${OSLOMSG_BRANCH:-master} OSLORWRAP_REPO=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git} OSLORWRAP_BRANCH=${OSLORWRAP_BRANCH:-master} +# pycadf auditing library +PYCADF_REPO=${PYCADF_REPO:-${GIT_BASE}/openstack/pycadf.git} +PYCADF_BRANCH=${PYCADF_BRANCH:-master} + +# stevedore plugin manager +STEVEDORE_REPO=${STEVEDORE_REPO:-${GIT_BASE}/openstack/stevedore.git} +STEVEDORE_BRANCH=${STEVEDORE_BRANCH:-master} + +# taskflow plugin manager +TASKFLOW_REPO=${TASKFLOW_REPO:-${GIT_BASE}/openstack/taskflow.git} +TASKFLOW_BRANCH=${TASKFLOW_BRANCH:-master} + # pbr drives the setuptools configs PBR_REPO=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git} PBR_BRANCH=${PBR_BRANCH:-master} From 9972ec23c43cea1be6ee5174e72c06e32f295212 Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Mon, 10 Feb 2014 11:22:39 -0500 Subject: [PATCH 0126/4119] Add marconi to enabled services This patch adds marconi to enabled services. This is needed to run the tempest experimental job for marconi. Change-Id: I28794c3acacc6daa9f698f8031b58d1ee13c3bad Implements: blueprint add-basic-marconi-tests --- lib/marconi | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/marconi b/lib/marconi index 1eaebbdf16..d1ab5f3a5c 100644 --- a/lib/marconi +++ b/lib/marconi @@ -58,6 +58,13 @@ TEMPEST_SERVICES+=,marconi # Functions # --------- +# Test if any Marconi services are enabled +# is_marconi_enabled +function is_marconi_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"marconi-" ]] && return 0 + return 1 +} + # cleanup_marconi() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_marconi() { From ae90f67e05a93e7b69cd019f6c50fa20405edb68 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Mon, 10 Feb 2014 14:23:54 -0500 Subject: [PATCH 0127/4119] Stop catting tempest.config during tempest setup This commit removes the 'cat tempest.config' from lib/tempest. There is no reason to cat it as part of running devstack because the file is and can be interacted with after devstack finishes running. To prevent a loss of information in the gate this change should be coupled with devstack-gate change: Ifb36918cd0d686cb3865f5322cd62c209acaaf30 which copies the tempest.config file with the other test artifacts. Change-Id: Ia01cd53660b3490ea9faa9e9c746bafd9df12a9b --- lib/tempest | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index 76da17062c..c8eebfcf05 100644 --- a/lib/tempest +++ b/lib/tempest @@ -348,9 +348,6 @@ function configure_tempest() { fi done - echo "Created tempest configuration file:" - cat $TEMPEST_CONFIG - # Restore IFS IFS=$ifs #Restore errexit From bc76f748ebfc57f5af3e006f4092ae574b8febfe Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 10 Feb 2014 21:11:04 -0500 Subject: [PATCH 0128/4119] remove build_tempest we haven't actually used this script in about a year, in favor of the actual in tree lib/tempest. Change-Id: I9d78b395846ebe833a38ba50edae226040cd7f45 --- tools/build_tempest.sh | 53 ------------------------------------------ 1 file changed, 53 deletions(-) delete mode 100755 tools/build_tempest.sh diff --git a/tools/build_tempest.sh b/tools/build_tempest.sh deleted file mode 100755 index 6c527f5962..0000000000 --- a/tools/build_tempest.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash -# -# **build_tempest.sh** - -# Checkout and prepare a Tempest repo: git://git.openstack.org/openstack/tempest.git - -function usage { - echo "$0 - Check out and prepare a Tempest repo" - echo "" - echo "Usage: $0" - exit 1 -} - -if [ "$1" = "-h" ]; then - usage -fi - -# Clean up any resources that may be in use -cleanup() { - set +o errexit - - # Kill ourselves to signal any calling process - trap 2; kill -2 $$ -} - -trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $TOOLS_DIR/..; pwd) - -# Import common functions -. $TOP_DIR/functions - -# Abort if localrc is not set -if [ ! -e $TOP_DIR/localrc ]; then - echo "You must have a localrc with ALL necessary passwords and configuration defined before proceeding." - echo "See stack.sh for required passwords." - exit 1 -fi - -# Source params -source ./stackrc - -# Where Openstack code lives -DEST=${DEST:-/opt/stack} - -TEMPEST_DIR=$DEST/tempest - -# Install tests and prerequisites -git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH - -trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT From 0b9776d2f34197d1e920e1dc8506b8f8c31452ca Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Tue, 28 Jan 2014 11:20:53 -0500 Subject: [PATCH 0129/4119] Install glance images before starting Nova The docker driver for Nova needs a registry service to be running. It is being run inside a container using an image -- that image must be downloaded. The registry service must be started via nova_plugins/hypervisor-docker, but this is presently called before Glance's image download. The reordering is being done such that Glance may download the registry image, but prior to starting Nova such that "hypervisor-docker" may have an image downloaded and available to launch the registry. This change should cause no negative effects on other hypervisors. Change-Id: I7bccb42517e4c6187f2a90c64f39cda4577f89a3 blueprint: docker-glance-uploads --- stack.sh | 82 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/stack.sh b/stack.sh index 303541d63e..78cfbc58ce 100755 --- a/stack.sh +++ b/stack.sh @@ -1090,6 +1090,47 @@ if is_service_enabled g-api g-reg; then start_glance fi +# Install Images +# ============== + +# Upload an image to glance. +# +# The default image is cirros, a small testing image which lets you login as **root** +# cirros has a ``cloud-init`` analog supporting login via keypair and sending +# scripts as userdata. +# See https://help.ubuntu.com/community/CloudInit for more on cloud-init +# +# Override ``IMAGE_URLS`` with a comma-separated list of UEC images. +# * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz + +if is_service_enabled g-reg; then + TOKEN=$(keystone token-get | grep ' id ' | get_field 2) + die_if_not_set $LINENO TOKEN "Keystone fail to get token" + + if is_baremetal; then + echo_summary "Creating and uploading baremetal images" + + # build and upload separate deploy kernel & ramdisk + upload_baremetal_deploy $TOKEN + + # upload images, separating out the kernel & ramdisk for PXE boot + for image_url in ${IMAGE_URLS//,/ }; do + upload_baremetal_image $image_url $TOKEN + done + else + echo_summary "Uploading images" + + # Option to upload legacy ami-tty, which works with xenserver + if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then + IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz" + fi + + for image_url in ${IMAGE_URLS//,/ }; do + upload_image $image_url $TOKEN + done + fi +fi + # Create an access key and secret key for nova ec2 register image if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1) @@ -1195,47 +1236,6 @@ if is_service_enabled nova && is_service_enabled key; then fi -# Install Images -# ============== - -# Upload an image to glance. -# -# The default image is cirros, a small testing image which lets you login as **root** -# cirros has a ``cloud-init`` analog supporting login via keypair and sending -# scripts as userdata. -# See https://help.ubuntu.com/community/CloudInit for more on cloud-init -# -# Override ``IMAGE_URLS`` with a comma-separated list of UEC images. -# * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz - -if is_service_enabled g-reg; then - TOKEN=$(keystone token-get | grep ' id ' | get_field 2) - die_if_not_set $LINENO TOKEN "Keystone fail to get token" - - if is_baremetal; then - echo_summary "Creating and uploading baremetal images" - - # build and upload separate deploy kernel & ramdisk - upload_baremetal_deploy $TOKEN - - # upload images, separating out the kernel & ramdisk for PXE boot - for image_url in ${IMAGE_URLS//,/ }; do - upload_baremetal_image $image_url $TOKEN - done - else - echo_summary "Uploading images" - - # Option to upload legacy ami-tty, which works with xenserver - if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then - IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz" - fi - - for image_url in ${IMAGE_URLS//,/ }; do - upload_image $image_url $TOKEN - done - fi -fi - # If we are running nova with baremetal driver, there are a few # last-mile configuration bits to attend to, which must happen # after n-api and n-sch have started. From 97ce935a9244956fd977cd1eb62e7b429e5cb141 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Fri, 31 Jan 2014 01:40:50 -0500 Subject: [PATCH 0130/4119] Update docker driver to use a CirrOS image For purposes of matching the VM image used in devstack across hypervisors, set the default container image for Docker to cirros. This uses the CirrOS image from stackbrew, the "standard library" for Docker. Change-Id: I9d767a4e06c5caa7b92ffea25e6a9aeda9bf282a --- lib/nova_plugins/hypervisor-docker | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index bb934b87d6..cdd9317761 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -31,8 +31,8 @@ DOCKER_UNIX_SOCKET=/var/run/docker.sock DOCKER_PID_FILE=/var/run/docker.pid DOCKER_REGISTRY_PORT=${DOCKER_REGISTRY_PORT:-5042} -DOCKER_IMAGE=${DOCKER_IMAGE:-busybox:latest} -DOCKER_IMAGE_NAME=busybox +DOCKER_IMAGE=${DOCKER_IMAGE:-cirros:latest} +DOCKER_IMAGE_NAME=cirros DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-registry:latest} DOCKER_REGISTRY_IMAGE_NAME=registry DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME} From d0860cc26d78c3f1c70b332ecc793442a1c8048d Mon Sep 17 00:00:00 2001 From: john-griffith Date: Thu, 23 Jan 2014 11:31:10 -0700 Subject: [PATCH 0131/4119] Replace custom cinder driver configs The devstack/lib/cinder file has a number of third party driver config options hard-coded in it. Rather than add yet another if driver== statement here let's use plugin files and do something similar to what's already in place for nova_hypervisors and neutron plugins. This works the same way folks were implementing their drivers already, the key is to use a CINDER_DRIVER variable in your localrc file that matches the name of the lib/cinder_plugin file to use. The existing third party driver entries that were in lib/cinder have been migrated to cooresponding plugin files. Change-Id: I4ee51ea542d5aa63879afd5297311a9df727c57f --- lib/cinder | 44 ++++++------------------------- lib/cinder_plugins/XenAPINFS | 44 +++++++++++++++++++++++++++++++ lib/cinder_plugins/glusterfs | 50 ++++++++++++++++++++++++++++++++++++ lib/cinder_plugins/nfs | 42 ++++++++++++++++++++++++++++++ lib/cinder_plugins/sheepdog | 39 ++++++++++++++++++++++++++++ lib/cinder_plugins/solidfire | 48 ++++++++++++++++++++++++++++++++++ lib/cinder_plugins/vsphere | 42 ++++++++++++++++++++++++++++++ 7 files changed, 273 insertions(+), 36 deletions(-) create mode 100644 lib/cinder_plugins/XenAPINFS create mode 100644 lib/cinder_plugins/glusterfs create mode 100644 lib/cinder_plugins/nfs create mode 100644 lib/cinder_plugins/sheepdog create mode 100644 lib/cinder_plugins/solidfire create mode 100644 lib/cinder_plugins/vsphere diff --git a/lib/cinder b/lib/cinder index 9f70b2a0c9..51eb3c1262 100644 --- a/lib/cinder +++ b/lib/cinder @@ -27,6 +27,12 @@ set +o xtrace # set up default driver CINDER_DRIVER=${CINDER_DRIVER:-default} +CINDER_PLUGINS=$TOP_DIR/lib/cinder_plugins + +# grab plugin config if specified via cinder_driver +if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then + source $CINDER_PLUGINS/$CINDER_DRIVER +fi # set up default directories CINDER_DIR=$DEST/cinder @@ -300,42 +306,8 @@ function configure_cinder() { setup_colorized_logging $CINDER_CONF DEFAULT "project_id" "user_id" fi - if [ "$CINDER_DRIVER" == "XenAPINFS" ]; then - ( - set -u - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver" - iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL" - iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME" - iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD" - iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" - iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" - ) - elif [ "$CINDER_DRIVER" == "nfs" ]; then - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.nfs.NfsDriver" - iniset $CINDER_CONF DEFAULT nfs_shares_config "$CINDER_CONF_DIR/nfs_shares.conf" - echo "$CINDER_NFS_SERVERPATH" | sudo tee "$CINDER_CONF_DIR/nfs_shares.conf" - sudo chmod 666 $CINDER_CONF_DIR/nfs_shares.conf - elif [ "$CINDER_DRIVER" == "sheepdog" ]; then - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver" - elif [ "$CINDER_DRIVER" == "glusterfs" ]; then - # To use glusterfs, set the following in localrc: - # CINDER_DRIVER=glusterfs - # CINDER_GLUSTERFS_SHARES="127.0.0.1:/vol1;127.0.0.1:/vol2" - # Shares are : and separated by semicolons. - - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.glusterfs.GlusterfsDriver" - iniset $CINDER_CONF DEFAULT glusterfs_shares_config "$CINDER_CONF_DIR/glusterfs_shares" - touch $CINDER_CONF_DIR/glusterfs_shares - if [ ! -z "$CINDER_GLUSTERFS_SHARES" ]; then - CINDER_GLUSTERFS_SHARES=$(echo $CINDER_GLUSTERFS_SHARES | tr ";" "\n") - echo "$CINDER_GLUSTERFS_SHARES" > $CINDER_CONF_DIR/glusterfs_shares - fi - elif [ "$CINDER_DRIVER" == "vsphere" ]; then - echo_summary "Using VMware vCenter driver" - iniset $CINDER_CONF DEFAULT vmware_host_ip "$VMWAREAPI_IP" - iniset $CINDER_CONF DEFAULT vmware_host_username "$VMWAREAPI_USER" - iniset $CINDER_CONF DEFAULT vmware_host_password "$VMWAREAPI_PASSWORD" - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver" + if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then + configure_cinder_driver fi if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then diff --git a/lib/cinder_plugins/XenAPINFS b/lib/cinder_plugins/XenAPINFS new file mode 100644 index 0000000000..72e1c1398c --- /dev/null +++ b/lib/cinder_plugins/XenAPINFS @@ -0,0 +1,44 @@ +# lib/cinder_plugins/XenAPINFS +# Configure the XenAPINFS driver + +# Enable with: +# +# CINDER_DRIVER=XenAPINFS + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver() { + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver" + iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL" + iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME" + iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD" + iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" + iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" +} + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/glusterfs b/lib/cinder_plugins/glusterfs new file mode 100644 index 0000000000..a0c5ae8d5e --- /dev/null +++ b/lib/cinder_plugins/glusterfs @@ -0,0 +1,50 @@ +# lib/cinder_plugins/glusterfs +# Configure the glusterfs driver + +# Enable with: +# +# CINDER_DRIVER=glusterfs + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver() { + # To use glusterfs, set the following in localrc: + # CINDER_DRIVER=glusterfs + # CINDER_GLUSTERFS_SHARES="127.0.0.1:/vol1;127.0.0.1:/vol2" + # Shares are : and separated by semicolons. + + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.glusterfs.GlusterfsDriver" + iniset $CINDER_CONF DEFAULT glusterfs_shares_config "$CINDER_CONF_DIR/glusterfs_shares" + touch $CINDER_CONF_DIR/glusterfs_shares + if [ ! -z "$CINDER_GLUSTERFS_SHARES" ]; then + CINDER_GLUSTERFS_SHARES=$(echo $CINDER_GLUSTERFS_SHARES | tr ";" "\n") + echo "$CINDER_GLUSTERFS_SHARES" > $CINDER_CONF_DIR/glusterfs_shares + fi +} + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/nfs b/lib/cinder_plugins/nfs new file mode 100644 index 0000000000..ea2c9ce665 --- /dev/null +++ b/lib/cinder_plugins/nfs @@ -0,0 +1,42 @@ +# lib/cinder_plugins/nfs +# Configure the nfs driver + +# Enable with: +# +# CINDER_DRIVER=nfs + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver() { + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.nfs.NfsDriver" + iniset $CINDER_CONF DEFAULT nfs_shares_config "$CINDER_CONF_DIR/nfs_shares.conf" + echo "$CINDER_NFS_SERVERPATH" | sudo tee "$CINDER_CONF_DIR/nfs_shares.conf" + sudo chmod 660 $CINDER_CONF_DIR/nfs_shares.conf +} + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/sheepdog b/lib/cinder_plugins/sheepdog new file mode 100644 index 0000000000..4435932371 --- /dev/null +++ b/lib/cinder_plugins/sheepdog @@ -0,0 +1,39 @@ +# lib/cinder_plugins/sheepdog +# Configure the sheepdog driver + +# Enable with: +# +# CINDER_DRIVER=sheepdog + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver() { + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver" +} + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/solidfire b/lib/cinder_plugins/solidfire new file mode 100644 index 0000000000..47c113e1a2 --- /dev/null +++ b/lib/cinder_plugins/solidfire @@ -0,0 +1,48 @@ +# lib/cinder_plugins/solidfire +# Configure the solidfire driver + +# Enable with: +# +# CINDER_DRIVER=solidfire + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver() { + # To use solidfire, set the following in localrc: + # CINDER_DRIVER=solidfire + # SAN_IP= + # SAN_LOGIN= + # SAN_PASSWORD= + + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.solidfire.SolidFireDriver" + iniset $CINDER_CONF DEFAULT san_ip $SAN_IP + iniset $CINDER_CONF DEFAULT san_login $SAN_LOGIN + iniset $CINDER_CONF DEFAULT san_password $SAN_PASSWORD +} + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/vsphere b/lib/cinder_plugins/vsphere new file mode 100644 index 0000000000..c8cab6a8c1 --- /dev/null +++ b/lib/cinder_plugins/vsphere @@ -0,0 +1,42 @@ +# lib/cinder_plugins/vsphere +# Configure the vsphere driver + +# Enable with: +# +# CINDER_DRIVER=vsphere + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver() { + iniset $CINDER_CONF DEFAULT vmware_host_ip "$VMWAREAPI_IP" + iniset $CINDER_CONF DEFAULT vmware_host_username "$VMWAREAPI_USER" + iniset $CINDER_CONF DEFAULT vmware_host_password "$VMWAREAPI_PASSWORD" + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver" +} + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: From 062cdaf84c11fbbef71cab1db833c4aac3baadbf Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 10 Feb 2014 22:24:49 -0500 Subject: [PATCH 0132/4119] add dstat to see top process info pidstat data isn't exceptionally useful, it's lots of lines, and seems to be missing some of the most critical one. dstat is kind of like sysstat, except the formatting is much better. It also supports advanced features like the top CPU using process at every interval. put this behind the sysstat variable, as we'll probably want to replace sysstat & pidstat with this if it works Change-Id: I48dc22a0a7e63fe3abb527646cc70525998a7d85 --- files/apts/dstat | 1 + files/rpms-suse/dstat | 1 + files/rpms/dstat | 1 + stack.sh | 12 ++++++++++++ 4 files changed, 15 insertions(+) create mode 100644 files/apts/dstat create mode 100644 files/rpms-suse/dstat create mode 100644 files/rpms/dstat diff --git a/files/apts/dstat b/files/apts/dstat new file mode 100644 index 0000000000..2b643b8b1b --- /dev/null +++ b/files/apts/dstat @@ -0,0 +1 @@ +dstat diff --git a/files/rpms-suse/dstat b/files/rpms-suse/dstat new file mode 100644 index 0000000000..2b643b8b1b --- /dev/null +++ b/files/rpms-suse/dstat @@ -0,0 +1 @@ +dstat diff --git a/files/rpms/dstat b/files/rpms/dstat new file mode 100644 index 0000000000..8a8f8fe737 --- /dev/null +++ b/files/rpms/dstat @@ -0,0 +1 @@ +dstat \ No newline at end of file diff --git a/stack.sh b/stack.sh index 303541d63e..1dfd4ddbc6 100755 --- a/stack.sh +++ b/stack.sh @@ -298,6 +298,8 @@ SYSLOG_PORT=${SYSLOG_PORT:-516} SYSSTAT_FILE=${SYSSTAT_FILE:-"sysstat.dat"} SYSSTAT_INTERVAL=${SYSSTAT_INTERVAL:-"1"} +DSTAT_FILE=${DSTAT_FILE:-"dstat.txt"} + PIDSTAT_FILE=${PIDSTAT_FILE:-"pidstat.txt"} PIDSTAT_INTERVAL=${PIDSTAT_INTERVAL:-"5"} @@ -879,6 +881,16 @@ if is_service_enabled sysstat; then fi fi +if is_service_enabled dstat; then + # Per-process stats + DSTAT_OPTS="-tcndylp --top-cpu-adv" + if [[ -n ${SCREEN_LOGDIR} ]]; then + screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE" + else + screen_it dstat "dstat $DSTAT_OPTS" + fi +fi + if is_service_enabled pidstat; then # Per-process stats PIDSTAT_OPTS="-l -p ALL -T ALL" From c86ec3568c7ed11ce38584b654b91594eb0d235a Mon Sep 17 00:00:00 2001 From: Yuriy Zveryanskyy Date: Wed, 12 Feb 2014 11:03:18 +0200 Subject: [PATCH 0133/4119] Fix hook script for Ironic cleanup_ironic should not be started on "unstack" phase, otherwise API service not restarted because auth cache directory removed. Change-Id: I3da86b9fb8c3ce1185aff05df0fa83cf259708f4 --- extras.d/50-ironic.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/extras.d/50-ironic.sh b/extras.d/50-ironic.sh index f68a14680f..9e61dc5d78 100644 --- a/extras.d/50-ironic.sh +++ b/extras.d/50-ironic.sh @@ -28,6 +28,9 @@ if is_service_enabled ir-api ir-cond; then if [[ "$1" == "unstack" ]]; then stop_ironic + fi + + if [[ "$1" == "clean" ]]; then cleanup_ironic fi fi From 2b69f23625f988d17574d746773e2932ca109427 Mon Sep 17 00:00:00 2001 From: tanlin Date: Wed, 12 Feb 2014 16:11:32 +0800 Subject: [PATCH 0134/4119] Rename Openstack to OpenStack Change-Id: I78ac040e926ef8040ee674b6fea3223a8ab4ae99 --- openrc | 2 +- tools/create_userrc.sh | 2 +- tools/jenkins/README.md | 2 +- tools/xen/README.md | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/openrc b/openrc index 784b00e51b..fc066ad82c 100644 --- a/openrc +++ b/openrc @@ -67,7 +67,7 @@ GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} # Identity API version export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0} -# Authenticating against an Openstack cloud using Keystone returns a **Token** +# Authenticating against an OpenStack cloud using Keystone returns a **Token** # and **Service Catalog**. The catalog contains the endpoints for all services # the user/tenant has access to - including nova, glance, keystone, swift, ... # We currently recommend using the 2.0 *identity api*. diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index e2d855c4df..1c9565145b 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -199,7 +199,7 @@ export EC2_URL="$EC2_URL" export S3_URL="$S3_URL" # OpenStack USER ID = $user_id export OS_USERNAME="$user_name" -# Openstack Tenant ID = $tenant_id +# OpenStack Tenant ID = $tenant_id export OS_TENANT_NAME="$tenant_name" export OS_AUTH_URL="$OS_AUTH_URL" export OS_CACERT="$OS_CACERT" diff --git a/tools/jenkins/README.md b/tools/jenkins/README.md index 371017db1a..3586da9c66 100644 --- a/tools/jenkins/README.md +++ b/tools/jenkins/README.md @@ -1,6 +1,6 @@ Getting Started With Jenkins and Devstack ========================================= -This little corner of devstack is to show how to get an Openstack jenkins +This little corner of devstack is to show how to get an OpenStack jenkins environment up and running quickly, using the rcb configuration methodology. diff --git a/tools/xen/README.md b/tools/xen/README.md index ee1abcc091..712782bc5f 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -1,11 +1,11 @@ # Getting Started With XenServer and Devstack The purpose of the code in this directory it to help developers bootstrap a -XenServer 6.2 (older versions may also work) + Openstack development +XenServer 6.2 (older versions may also work) + OpenStack development environment. This file gives some pointers on how to get started. Xenserver is a Type 1 hypervisor, so it is best installed on bare metal. The -Openstack services are configured to run within a virtual machine (called OS +OpenStack services are configured to run within a virtual machine (called OS domU) on the XenServer host. The VM uses the XAPI toolstack to communicate with the host over a network connection (see `MGT_BRIDGE_OR_NET_NAME`). From 1814e671d3af0231aa18a08d3406d54332f9b4ef Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Tue, 11 Feb 2014 17:56:07 +0100 Subject: [PATCH 0135/4119] Fix bug "Invalid OpenStack Nova credentials." on the gate During the process, when create_userrc.sh tries to creates certificates and sourcable rc, it assumes that all users have the same password. Change-Id: Ifb57a43aad439ffe041e98465719a8a8eceae544 Closes-Bug: #1260723 --- lib/swift | 11 ++++++++--- tools/create_userrc.sh | 8 ++++++++ 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/lib/swift b/lib/swift index 0febb00f60..be25c81468 100644 --- a/lib/swift +++ b/lib/swift @@ -520,6 +520,11 @@ function create_swift_disk() { # swifttenanttest2 swiftusertest2 admin function create_swift_accounts() { + # Defines specific passwords used by tools/create_userrc.sh + SWIFTUSERTEST1_PASSWORD=testing + SWIFTUSERTEST2_PASSWORD=testing2 + SWIFTUSERTEST3_PASSWORD=testing3 + KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql} SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") @@ -542,17 +547,17 @@ function create_swift_accounts() { SWIFT_TENANT_TEST1=$(keystone tenant-create --name=swifttenanttest1 | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_TENANT_TEST1 "Failure creating SWIFT_TENANT_TEST1" - SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=testing --email=test@example.com | grep " id " | get_field 2) + SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=$SWIFTUSERTEST1_PASSWORD --email=test@example.com | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1" keystone user-role-add --user-id $SWIFT_USER_TEST1 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST1 - SWIFT_USER_TEST3=$(keystone user-create --name=swiftusertest3 --pass=testing3 --email=test3@example.com | grep " id " | get_field 2) + SWIFT_USER_TEST3=$(keystone user-create --name=swiftusertest3 --pass=$SWIFTUSERTEST3_PASSWORD --email=test3@example.com | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_USER_TEST3 "Failure creating SWIFT_USER_TEST3" keystone user-role-add --user-id $SWIFT_USER_TEST3 --role-id $ANOTHER_ROLE --tenant-id $SWIFT_TENANT_TEST1 SWIFT_TENANT_TEST2=$(keystone tenant-create --name=swifttenanttest2 | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_TENANT_TEST2 "Failure creating SWIFT_TENANT_TEST2" - SWIFT_USER_TEST2=$(keystone user-create --name=swiftusertest2 --pass=testing2 --email=test2@example.com | grep " id " | get_field 2) + SWIFT_USER_TEST2=$(keystone user-create --name=swiftusertest2 --pass=$SWIFTUSERTEST2_PASSWORD --email=test2@example.com | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_USER_TEST2 "Failure creating SWIFT_USER_TEST2" keystone user-role-add --user-id $SWIFT_USER_TEST2 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST2 } diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index e2d855c4df..d9c93cc476 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -253,6 +253,14 @@ if [ $MODE != "create" ]; then if [ $MODE = one -a "$user_name" != "$USER_NAME" ]; then continue; fi + + # Checks for a specific password defined for an user. + # Example for an username johndoe: + # JOHNDOE_PASSWORD=1234 + eval SPECIFIC_UPASSWORD="\$${USER_NAME^^}_PASSWORD" + if [ -n "$SPECIFIC_UPASSWORD" ]; then + USER_PASS=$SPECIFIC_UPASSWORD + fi add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" done done From dff49a242eef817efa23d4e808aaa6a74ac82ed0 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 30 Jan 2014 15:37:40 -0600 Subject: [PATCH 0136/4119] Split functions Move shared and non-DevStack-specific functions to `functions-common`. This is a code move only with some updated comments. The functions are now sorted alphabetically within function groups, eg. all git-related functions are grouped together. The groups are listed at the top of the file. 'functions' sources 'functions-common' so no additional changes are required for backward-compatability. All functions shared with Grenade have also been moved. functions-common was created from commit e0ed8ea038299952826b27a16753775472f108d8 Change-Id: I73bf7134fd6a60ec1ea44a5bfab08b0569b60ded --- functions | 1422 +-------------------------------------------- functions-common | 1433 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 1439 insertions(+), 1416 deletions(-) create mode 100644 functions-common diff --git a/functions b/functions index dc3278b56d..5eae7fe510 100644 --- a/functions +++ b/functions @@ -1,563 +1,21 @@ -# functions - Common functions used by DevStack components +# functions - DevStack-specific functions # # The following variables are assumed to be defined by certain functions: # # - ``ENABLED_SERVICES`` -# - ``ERROR_ON_CLONE`` # - ``FILES`` # - ``GLANCE_HOSTPORT`` -# - ``OFFLINE`` -# - ``PIP_DOWNLOAD_CACHE`` -# - ``PIP_USE_MIRRORS`` -# - ``RECLONE`` # - ``TRACK_DEPENDS`` -# - ``http_proxy``, ``https_proxy``, ``no_proxy`` +# Include the common functions +FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) +source ${FUNC_DIR}/functions-common # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace -# Convert CIDR notation to a IPv4 netmask -# cidr2netmask cidr-bits -function cidr2netmask() { - local maskpat="255 255 255 255" - local maskdgt="254 252 248 240 224 192 128" - set -- ${maskpat:0:$(( ($1 / 8) * 4 ))}${maskdgt:$(( (7 - ($1 % 8)) * 4 )):3} - echo ${1-0}.${2-0}.${3-0}.${4-0} -} - - -# Return the network portion of the given IP address using netmask -# netmask is in the traditional dotted-quad format -# maskip ip-address netmask -function maskip() { - local ip=$1 - local mask=$2 - local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}" - local subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.})) - echo $subnet -} - - -# Exit 0 if address is in network or 1 if address is not in network -# ip-range is in CIDR notation: 1.2.3.4/20 -# address_in_net ip-address ip-range -function address_in_net() { - local ip=$1 - local range=$2 - local masklen=${range#*/} - local network=$(maskip ${range%/*} $(cidr2netmask $masklen)) - local subnet=$(maskip $ip $(cidr2netmask $masklen)) - [[ $network == $subnet ]] -} - - -# Wrapper for ``apt-get`` to set cache and proxy environment variables -# Uses globals ``OFFLINE``, ``*_proxy`` -# apt_get operation package [package ...] -function apt_get() { - [[ "$OFFLINE" = "True" || -z "$@" ]] && return - local sudo="sudo" - [[ "$(id -u)" = "0" ]] && sudo="env" - $sudo DEBIAN_FRONTEND=noninteractive \ - http_proxy=$http_proxy https_proxy=$https_proxy \ - no_proxy=$no_proxy \ - apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@" -} - - -# Gracefully cp only if source file/dir exists -# cp_it source destination -function cp_it { - if [ -e $1 ] || [ -d $1 ]; then - cp -pRL $1 $2 - fi -} - - -# Prints backtrace info -# filename:lineno:function -function backtrace { - local level=$1 - local deep=$((${#BASH_SOURCE[@]} - 1)) - echo "[Call Trace]" - while [ $level -le $deep ]; do - echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}" - deep=$((deep - 1)) - done -} - - -# Prints line number and "message" then exits -# die $LINENO "message" -function die() { - local exitcode=$? - set +o xtrace - local line=$1; shift - if [ $exitcode == 0 ]; then - exitcode=1 - fi - backtrace 2 - err $line "$*" - exit $exitcode -} - - -# Checks an environment variable is not set or has length 0 OR if the -# exit code is non-zero and prints "message" and exits -# NOTE: env-var is the variable name without a '$' -# die_if_not_set $LINENO env-var "message" -function die_if_not_set() { - local exitcode=$? - FXTRACE=$(set +o | grep xtrace) - set +o xtrace - local line=$1; shift - local evar=$1; shift - if ! is_set $evar || [ $exitcode != 0 ]; then - die $line "$*" - fi - $FXTRACE -} - - -# Prints line number and "message" in error format -# err $LINENO "message" -function err() { - local exitcode=$? - errXTRACE=$(set +o | grep xtrace) - set +o xtrace - local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2" - echo $msg 1>&2; - if [[ -n ${SCREEN_LOGDIR} ]]; then - echo $msg >> "${SCREEN_LOGDIR}/error.log" - fi - $errXTRACE - return $exitcode -} - - -# Checks an environment variable is not set or has length 0 OR if the -# exit code is non-zero and prints "message" -# NOTE: env-var is the variable name without a '$' -# err_if_not_set $LINENO env-var "message" -function err_if_not_set() { - local exitcode=$? - errinsXTRACE=$(set +o | grep xtrace) - set +o xtrace - local line=$1; shift - local evar=$1; shift - if ! is_set $evar || [ $exitcode != 0 ]; then - err $line "$*" - fi - $errinsXTRACE - return $exitcode -} - - -# Prints line number and "message" in warning format -# warn $LINENO "message" -function warn() { - local exitcode=$? - errXTRACE=$(set +o | grep xtrace) - set +o xtrace - local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2" - echo $msg 1>&2; - if [[ -n ${SCREEN_LOGDIR} ]]; then - echo $msg >> "${SCREEN_LOGDIR}/error.log" - fi - $errXTRACE - return $exitcode -} - - -# HTTP and HTTPS proxy servers are supported via the usual environment variables [1] -# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in -# ``localrc`` or on the command line if necessary:: -# -# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html -# -# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh - -function export_proxy_variables() { - if [[ -n "$http_proxy" ]]; then - export http_proxy=$http_proxy - fi - if [[ -n "$https_proxy" ]]; then - export https_proxy=$https_proxy - fi - if [[ -n "$no_proxy" ]]; then - export no_proxy=$no_proxy - fi -} - - -# Grab a numbered field from python prettytable output -# Fields are numbered starting with 1 -# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc. -# get_field field-number -function get_field() { - while read data; do - if [ "$1" -lt 0 ]; then - field="(\$(NF$1))" - else - field="\$$(($1 + 1))" - fi - echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}" - done -} - - -# Get the default value for HOST_IP -# get_default_host_ip fixed_range floating_range host_ip_iface host_ip -function get_default_host_ip() { - local fixed_range=$1 - local floating_range=$2 - local host_ip_iface=$3 - local host_ip=$4 - - # Find the interface used for the default route - host_ip_iface=${host_ip_iface:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)} - # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable - if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then - host_ip="" - host_ips=`LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/"); print parts[1]}'` - for IP in $host_ips; do - # Attempt to filter out IP addresses that are part of the fixed and - # floating range. Note that this method only works if the ``netaddr`` - # python library is installed. If it is not installed, an error - # will be printed and the first IP from the interface will be used. - # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct - # address. - if ! (address_in_net $IP $fixed_range || address_in_net $IP $floating_range); then - host_ip=$IP - break; - fi - done - fi - echo $host_ip -} - - -function _get_package_dir() { - local pkg_dir - if is_ubuntu; then - pkg_dir=$FILES/apts - elif is_fedora; then - pkg_dir=$FILES/rpms - elif is_suse; then - pkg_dir=$FILES/rpms-suse - else - exit_distro_not_supported "list of packages" - fi - echo "$pkg_dir" -} - - -# get_packages() collects a list of package names of any type from the -# prerequisite files in ``files/{apts|rpms}``. The list is intended -# to be passed to a package installer such as apt or yum. -# -# Only packages required for the services in 1st argument will be -# included. Two bits of metadata are recognized in the prerequisite files: -# -# - ``# NOPRIME`` defers installation to be performed later in `stack.sh` -# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection -# of the package to the distros listed. The distro names are case insensitive. -function get_packages() { - local services=$@ - local package_dir=$(_get_package_dir) - local file_to_parse - local service - - if [[ -z "$package_dir" ]]; then - echo "No package directory supplied" - return 1 - fi - if [[ -z "$DISTRO" ]]; then - GetDistro - fi - for service in ${services//,/ }; do - # Allow individual services to specify dependencies - if [[ -e ${package_dir}/${service} ]]; then - file_to_parse="${file_to_parse} $service" - fi - # NOTE(sdague) n-api needs glance for now because that's where - # glance client is - if [[ $service == n-api ]]; then - if [[ ! $file_to_parse =~ nova ]]; then - file_to_parse="${file_to_parse} nova" - fi - if [[ ! $file_to_parse =~ glance ]]; then - file_to_parse="${file_to_parse} glance" - fi - elif [[ $service == c-* ]]; then - if [[ ! $file_to_parse =~ cinder ]]; then - file_to_parse="${file_to_parse} cinder" - fi - elif [[ $service == ceilometer-* ]]; then - if [[ ! $file_to_parse =~ ceilometer ]]; then - file_to_parse="${file_to_parse} ceilometer" - fi - elif [[ $service == s-* ]]; then - if [[ ! $file_to_parse =~ swift ]]; then - file_to_parse="${file_to_parse} swift" - fi - elif [[ $service == n-* ]]; then - if [[ ! $file_to_parse =~ nova ]]; then - file_to_parse="${file_to_parse} nova" - fi - elif [[ $service == g-* ]]; then - if [[ ! $file_to_parse =~ glance ]]; then - file_to_parse="${file_to_parse} glance" - fi - elif [[ $service == key* ]]; then - if [[ ! $file_to_parse =~ keystone ]]; then - file_to_parse="${file_to_parse} keystone" - fi - elif [[ $service == q-* ]]; then - if [[ ! $file_to_parse =~ neutron ]]; then - file_to_parse="${file_to_parse} neutron" - fi - fi - done - - for file in ${file_to_parse}; do - local fname=${package_dir}/${file} - local OIFS line package distros distro - [[ -e $fname ]] || continue - - OIFS=$IFS - IFS=$'\n' - for line in $(<${fname}); do - if [[ $line =~ "NOPRIME" ]]; then - continue - fi - - # Assume we want this package - package=${line%#*} - inst_pkg=1 - - # Look for # dist:xxx in comment - if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then - # We are using BASH regexp matching feature. - package=${BASH_REMATCH[1]} - distros=${BASH_REMATCH[2]} - # In bash ${VAR,,} will lowecase VAR - # Look for a match in the distro list - if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then - # If no match then skip this package - inst_pkg=0 - fi - fi - - # Look for # testonly in comment - if [[ $line =~ (.*)#.*testonly.* ]]; then - package=${BASH_REMATCH[1]} - # Are we installing test packages? (test for the default value) - if [[ $INSTALL_TESTONLY_PACKAGES = "False" ]]; then - # If not installing test packages the skip this package - inst_pkg=0 - fi - fi - - if [[ $inst_pkg = 1 ]]; then - echo $package - fi - done - IFS=$OIFS - done -} - - -# Determine OS Vendor, Release and Update -# Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora -# Returns results in global variables: -# os_VENDOR - vendor name -# os_RELEASE - release -# os_UPDATE - update -# os_PACKAGE - package type -# os_CODENAME - vendor's codename for release -# GetOSVersion -GetOSVersion() { - # Figure out which vendor we are - if [[ -x "`which sw_vers 2>/dev/null`" ]]; then - # OS/X - os_VENDOR=`sw_vers -productName` - os_RELEASE=`sw_vers -productVersion` - os_UPDATE=${os_RELEASE##*.} - os_RELEASE=${os_RELEASE%.*} - os_PACKAGE="" - if [[ "$os_RELEASE" =~ "10.7" ]]; then - os_CODENAME="lion" - elif [[ "$os_RELEASE" =~ "10.6" ]]; then - os_CODENAME="snow leopard" - elif [[ "$os_RELEASE" =~ "10.5" ]]; then - os_CODENAME="leopard" - elif [[ "$os_RELEASE" =~ "10.4" ]]; then - os_CODENAME="tiger" - elif [[ "$os_RELEASE" =~ "10.3" ]]; then - os_CODENAME="panther" - else - os_CODENAME="" - fi - elif [[ -x $(which lsb_release 2>/dev/null) ]]; then - os_VENDOR=$(lsb_release -i -s) - os_RELEASE=$(lsb_release -r -s) - os_UPDATE="" - os_PACKAGE="rpm" - if [[ "Debian,Ubuntu,LinuxMint" =~ $os_VENDOR ]]; then - os_PACKAGE="deb" - elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then - lsb_release -d -s | grep -q openSUSE - if [[ $? -eq 0 ]]; then - os_VENDOR="openSUSE" - fi - elif [[ $os_VENDOR == "openSUSE project" ]]; then - os_VENDOR="openSUSE" - elif [[ $os_VENDOR =~ Red.*Hat ]]; then - os_VENDOR="Red Hat" - fi - os_CODENAME=$(lsb_release -c -s) - elif [[ -r /etc/redhat-release ]]; then - # Red Hat Enterprise Linux Server release 5.5 (Tikanga) - # Red Hat Enterprise Linux Server release 7.0 Beta (Maipo) - # CentOS release 5.5 (Final) - # CentOS Linux release 6.0 (Final) - # Fedora release 16 (Verne) - # XenServer release 6.2.0-70446c (xenenterprise) - os_CODENAME="" - for r in "Red Hat" CentOS Fedora XenServer; do - os_VENDOR=$r - if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then - ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release` - os_CODENAME=${ver#*|} - os_RELEASE=${ver%|*} - os_UPDATE=${os_RELEASE##*.} - os_RELEASE=${os_RELEASE%.*} - break - fi - os_VENDOR="" - done - os_PACKAGE="rpm" - elif [[ -r /etc/SuSE-release ]]; then - for r in openSUSE "SUSE Linux"; do - if [[ "$r" = "SUSE Linux" ]]; then - os_VENDOR="SUSE LINUX" - else - os_VENDOR=$r - fi - - if [[ -n "`grep \"$r\" /etc/SuSE-release`" ]]; then - os_CODENAME=`grep "CODENAME = " /etc/SuSE-release | sed 's:.* = ::g'` - os_RELEASE=`grep "VERSION = " /etc/SuSE-release | sed 's:.* = ::g'` - os_UPDATE=`grep "PATCHLEVEL = " /etc/SuSE-release | sed 's:.* = ::g'` - break - fi - os_VENDOR="" - done - os_PACKAGE="rpm" - # If lsb_release is not installed, we should be able to detect Debian OS - elif [[ -f /etc/debian_version ]] && [[ $(cat /proc/version) =~ "Debian" ]]; then - os_VENDOR="Debian" - os_PACKAGE="deb" - os_CODENAME=$(awk '/VERSION=/' /etc/os-release | sed 's/VERSION=//' | sed -r 's/\"|\(|\)//g' | awk '{print $2}') - os_RELEASE=$(awk '/VERSION_ID=/' /etc/os-release | sed 's/VERSION_ID=//' | sed 's/\"//g') - fi - export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME -} - - -# Translate the OS version values into common nomenclature -# Sets ``DISTRO`` from the ``os_*`` values -function GetDistro() { - GetOSVersion - if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then - # 'Everyone' refers to Ubuntu / Debian releases by the code name adjective - DISTRO=$os_CODENAME - elif [[ "$os_VENDOR" =~ (Fedora) ]]; then - # For Fedora, just use 'f' and the release - DISTRO="f$os_RELEASE" - elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then - DISTRO="opensuse-$os_RELEASE" - elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then - # For SLE, also use the service pack - if [[ -z "$os_UPDATE" ]]; then - DISTRO="sle${os_RELEASE}" - else - DISTRO="sle${os_RELEASE}sp${os_UPDATE}" - fi - elif [[ "$os_VENDOR" =~ (Red Hat) || "$os_VENDOR" =~ (CentOS) ]]; then - # Drop the . release as we assume it's compatible - DISTRO="rhel${os_RELEASE::1}" - elif [[ "$os_VENDOR" =~ (XenServer) ]]; then - DISTRO="xs$os_RELEASE" - else - # Catch-all for now is Vendor + Release + Update - DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE" - fi - export DISTRO -} - - -# Determine if current distribution is a Fedora-based distribution -# (Fedora, RHEL, CentOS, etc). -# is_fedora -function is_fedora { - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || [ "$os_VENDOR" = "CentOS" ] -} - - -# Determine if current distribution is a SUSE-based distribution -# (openSUSE, SLE). -# is_suse -function is_suse { - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - [ "$os_VENDOR" = "openSUSE" ] || [ "$os_VENDOR" = "SUSE LINUX" ] -} - - -# Determine if current distribution is an Ubuntu-based distribution -# It will also detect non-Ubuntu but Debian-based distros -# is_ubuntu -function is_ubuntu { - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - [ "$os_PACKAGE" = "deb" ] -} - - -# Exit after outputting a message about the distribution not being supported. -# exit_distro_not_supported [optional-string-telling-what-is-missing] -function exit_distro_not_supported { - if [[ -z "$DISTRO" ]]; then - GetDistro - fi - - if [ $# -gt 0 ]; then - die $LINENO "Support for $DISTRO is incomplete: no support for $@" - else - die $LINENO "Support for $DISTRO is incomplete." - fi -} - -# Utility function for checking machine architecture -# is_arch arch-type -function is_arch { - ARCH_TYPE=$1 - - [[ "$(uname -m)" == "$ARCH_TYPE" ]] -} - # Checks if installed Apache is <= given version # $1 = x.y.z (version string of Apache) function check_apache_version { @@ -570,488 +28,6 @@ function check_apache_version { expr "$version" '>=' $1 > /dev/null } -# git clone only if directory doesn't exist already. Since ``DEST`` might not -# be owned by the installation user, we create the directory and change the -# ownership to the proper user. -# Set global RECLONE=yes to simulate a clone when dest-dir exists -# Set global ERROR_ON_CLONE=True to abort execution with an error if the git repo -# does not exist (default is False, meaning the repo will be cloned). -# Uses global ``OFFLINE`` -# git_clone remote dest-dir branch -function git_clone { - GIT_REMOTE=$1 - GIT_DEST=$2 - GIT_REF=$3 - RECLONE=$(trueorfalse False $RECLONE) - - if [[ "$OFFLINE" = "True" ]]; then - echo "Running in offline mode, clones already exist" - # print out the results so we know what change was used in the logs - cd $GIT_DEST - git show --oneline | head -1 - return - fi - - if echo $GIT_REF | egrep -q "^refs"; then - # If our branch name is a gerrit style refs/changes/... - if [[ ! -d $GIT_DEST ]]; then - [[ "$ERROR_ON_CLONE" = "True" ]] && \ - die $LINENO "Cloning not allowed in this configuration" - git clone $GIT_REMOTE $GIT_DEST - fi - cd $GIT_DEST - git fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD - else - # do a full clone only if the directory doesn't exist - if [[ ! -d $GIT_DEST ]]; then - [[ "$ERROR_ON_CLONE" = "True" ]] && \ - die $LINENO "Cloning not allowed in this configuration" - git clone $GIT_REMOTE $GIT_DEST - cd $GIT_DEST - # This checkout syntax works for both branches and tags - git checkout $GIT_REF - elif [[ "$RECLONE" = "True" ]]; then - # if it does exist then simulate what clone does if asked to RECLONE - cd $GIT_DEST - # set the url to pull from and fetch - git remote set-url origin $GIT_REMOTE - git fetch origin - # remove the existing ignored files (like pyc) as they cause breakage - # (due to the py files having older timestamps than our pyc, so python - # thinks the pyc files are correct using them) - find $GIT_DEST -name '*.pyc' -delete - - # handle GIT_REF accordingly to type (tag, branch) - if [[ -n "`git show-ref refs/tags/$GIT_REF`" ]]; then - git_update_tag $GIT_REF - elif [[ -n "`git show-ref refs/heads/$GIT_REF`" ]]; then - git_update_branch $GIT_REF - elif [[ -n "`git show-ref refs/remotes/origin/$GIT_REF`" ]]; then - git_update_remote_branch $GIT_REF - else - die $LINENO "$GIT_REF is neither branch nor tag" - fi - - fi - fi - - # print out the results so we know what change was used in the logs - cd $GIT_DEST - git show --oneline | head -1 -} - - -# git update using reference as a branch. -# git_update_branch ref -function git_update_branch() { - - GIT_BRANCH=$1 - - git checkout -f origin/$GIT_BRANCH - # a local branch might not exist - git branch -D $GIT_BRANCH || true - git checkout -b $GIT_BRANCH -} - - -# git update using reference as a branch. -# git_update_remote_branch ref -function git_update_remote_branch() { - - GIT_BRANCH=$1 - - git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH -} - - -# git update using reference as a tag. Be careful editing source at that repo -# as working copy will be in a detached mode -# git_update_tag ref -function git_update_tag() { - - GIT_TAG=$1 - - git tag -d $GIT_TAG - # fetching given tag only - git fetch origin tag $GIT_TAG - git checkout -f $GIT_TAG -} - - -# Comment an option in an INI file -# inicomment config-file section option -function inicomment() { - local file=$1 - local section=$2 - local option=$3 - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" -} - - -# Uncomment an option in an INI file -# iniuncomment config-file section option -function iniuncomment() { - local file=$1 - local section=$2 - local option=$3 - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file" -} - - -# Get an option from an INI file -# iniget config-file section option -function iniget() { - local file=$1 - local section=$2 - local option=$3 - local line - line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") - echo ${line#*=} -} - - -# Determinate is the given option present in the INI file -# ini_has_option config-file section option -function ini_has_option() { - local file=$1 - local section=$2 - local option=$3 - local line - line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") - [ -n "$line" ] -} - - -# Set an option in an INI file -# iniset config-file section option value -function iniset() { - local file=$1 - local section=$2 - local option=$3 - local value=$4 - - [[ -z $section || -z $option ]] && return - - if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then - # Add section at the end - echo -e "\n[$section]" >>"$file" - fi - if ! ini_has_option "$file" "$section" "$option"; then - # Add it - sed -i -e "/^\[$section\]/ a\\ -$option = $value -" "$file" - else - local sep=$(echo -ne "\x01") - # Replace it - sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" - fi -} - - -# Get a multiple line option from an INI file -# iniget_multiline config-file section option -function iniget_multiline() { - local file=$1 - local section=$2 - local option=$3 - local values - values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file") - echo ${values} -} - - -# Set a multiple line option in an INI file -# iniset_multiline config-file section option value1 value2 valu3 ... -function iniset_multiline() { - local file=$1 - local section=$2 - local option=$3 - shift 3 - local values - for v in $@; do - # The later sed command inserts each new value in the line next to - # the section identifier, which causes the values to be inserted in - # the reverse order. Do a reverse here to keep the original order. - values="$v ${values}" - done - if ! grep -q "^\[$section\]" "$file"; then - # Add section at the end - echo -e "\n[$section]" >>"$file" - else - # Remove old values - sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file" - fi - # Add new ones - for v in $values; do - sed -i -e "/^\[$section\]/ a\\ -$option = $v -" "$file" - done -} - - -# Append a new option in an ini file without replacing the old value -# iniadd config-file section option value1 value2 value3 ... -function iniadd() { - local file=$1 - local section=$2 - local option=$3 - shift 3 - local values="$(iniget_multiline $file $section $option) $@" - iniset_multiline $file $section $option $values -} - -# Find out if a process exists by partial name. -# is_running name -function is_running() { - local name=$1 - ps auxw | grep -v grep | grep ${name} > /dev/null - RC=$? - # some times I really hate bash reverse binary logic - return $RC -} - - -# is_service_enabled() checks if the service(s) specified as arguments are -# enabled by the user in ``ENABLED_SERVICES``. -# -# Multiple services specified as arguments are ``OR``'ed together; the test -# is a short-circuit boolean, i.e it returns on the first match. -# -# There are special cases for some 'catch-all' services:: -# **nova** returns true if any service enabled start with **n-** -# **cinder** returns true if any service enabled start with **c-** -# **ceilometer** returns true if any service enabled start with **ceilometer** -# **glance** returns true if any service enabled start with **g-** -# **neutron** returns true if any service enabled start with **q-** -# **swift** returns true if any service enabled start with **s-** -# **trove** returns true if any service enabled start with **tr-** -# For backward compatibility if we have **swift** in ENABLED_SERVICES all the -# **s-** services will be enabled. This will be deprecated in the future. -# -# Cells within nova is enabled if **n-cell** is in ``ENABLED_SERVICES``. -# We also need to make sure to treat **n-cell-region** and **n-cell-child** -# as enabled in this case. -# -# Uses global ``ENABLED_SERVICES`` -# is_service_enabled service [service ...] -function is_service_enabled() { - services=$@ - for service in ${services}; do - [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 - - # Look for top-level 'enabled' function for this service - if type is_${service}_enabled >/dev/null 2>&1; then - # A function exists for this service, use it - is_${service}_enabled - return $? - fi - - # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled() - # are implemented - [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && return 0 - [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0 - [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0 - [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0 - [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0 - [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && return 0 - [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 - [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && return 0 - [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0 - [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0 - done - return 1 -} - - -# remove extra commas from the input string (i.e. ``ENABLED_SERVICES``) -# _cleanup_service_list service-list -function _cleanup_service_list () { - echo "$1" | sed -e ' - s/,,/,/g; - s/^,//; - s/,$// - ' -} - - -# enable_service() adds the services passed as argument to the -# ``ENABLED_SERVICES`` list, if they are not already present. -# -# For example: -# enable_service qpid -# -# This function does not know about the special cases -# for nova, glance, and neutron built into is_service_enabled(). -# Uses global ``ENABLED_SERVICES`` -# enable_service service [service ...] -function enable_service() { - local tmpsvcs="${ENABLED_SERVICES}" - for service in $@; do - if ! is_service_enabled $service; then - tmpsvcs+=",$service" - fi - done - ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") - disable_negated_services -} - - -# disable_service() removes the services passed as argument to the -# ``ENABLED_SERVICES`` list, if they are present. -# -# For example: -# disable_service rabbit -# -# This function does not know about the special cases -# for nova, glance, and neutron built into is_service_enabled(). -# Uses global ``ENABLED_SERVICES`` -# disable_service service [service ...] -function disable_service() { - local tmpsvcs=",${ENABLED_SERVICES}," - local service - for service in $@; do - if is_service_enabled $service; then - tmpsvcs=${tmpsvcs//,$service,/,} - fi - done - ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") -} - - -# disable_all_services() removes all current services -# from ``ENABLED_SERVICES`` to reset the configuration -# before a minimal installation -# Uses global ``ENABLED_SERVICES`` -# disable_all_services -function disable_all_services() { - ENABLED_SERVICES="" -} - - -# Remove all services starting with '-'. For example, to install all default -# services except rabbit (rabbit) set in ``localrc``: -# ENABLED_SERVICES+=",-rabbit" -# Uses global ``ENABLED_SERVICES`` -# disable_negated_services -function disable_negated_services() { - local tmpsvcs="${ENABLED_SERVICES}" - local service - for service in ${tmpsvcs//,/ }; do - if [[ ${service} == -* ]]; then - tmpsvcs=$(echo ${tmpsvcs}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g") - fi - done - ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") -} - - -# Distro-agnostic package installer -# install_package package [package ...] -function install_package() { - if is_ubuntu; then - [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update - NO_UPDATE_REPOS=True - - apt_get install "$@" - elif is_fedora; then - yum_install "$@" - elif is_suse; then - zypper_install "$@" - else - exit_distro_not_supported "installing packages" - fi -} - - -# Distro-agnostic package uninstaller -# uninstall_package package [package ...] -function uninstall_package() { - if is_ubuntu; then - apt_get purge "$@" - elif is_fedora; then - sudo yum remove -y "$@" - elif is_suse; then - sudo zypper rm "$@" - else - exit_distro_not_supported "uninstalling packages" - fi -} - - -# Distro-agnostic function to tell if a package is installed -# is_package_installed package [package ...] -function is_package_installed() { - if [[ -z "$@" ]]; then - return 1 - fi - - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - - if [[ "$os_PACKAGE" = "deb" ]]; then - dpkg -s "$@" > /dev/null 2> /dev/null - elif [[ "$os_PACKAGE" = "rpm" ]]; then - rpm --quiet -q "$@" - else - exit_distro_not_supported "finding if a package is installed" - fi -} - - -# Test if the named environment variable is set and not zero length -# is_set env-var -function is_set() { - local var=\$"$1" - eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this -} - - -# Wrapper for ``pip install`` to set cache and proxy environment variables -# Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``PIP_USE_MIRRORS``, -# ``TRACK_DEPENDS``, ``*_proxy`` -# pip_install package [package ...] -function pip_install { - [[ "$OFFLINE" = "True" || -z "$@" ]] && return - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - if [[ $TRACK_DEPENDS = True ]]; then - source $DEST/.venv/bin/activate - CMD_PIP=$DEST/.venv/bin/pip - SUDO_PIP="env" - else - SUDO_PIP="sudo" - CMD_PIP=$(get_pip_command) - fi - - # Mirror option not needed anymore because pypi has CDN available, - # but it's useful in certain circumstances - PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False} - if [[ "$PIP_USE_MIRRORS" != "False" ]]; then - PIP_MIRROR_OPT="--use-mirrors" - fi - - # pip < 1.4 has a bug where it will use an already existing build - # directory unconditionally. Say an earlier component installs - # foo v1.1; pip will have built foo's source in - # /tmp/$USER-pip-build. Even if a later component specifies foo < - # 1.1, the existing extracted build will be used and cause - # confusing errors. By creating unique build directories we avoid - # this problem. See https://github.com/pypa/pip/issues/709 - local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX) - - $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \ - HTTP_PROXY=$http_proxy \ - HTTPS_PROXY=$https_proxy \ - NO_PROXY=$no_proxy \ - $CMD_PIP install --build=${pip_build_tmp} \ - $PIP_MIRROR_OPT $@ \ - && $SUDO_PIP rm -rf ${pip_build_tmp} -} - # Cleanup anything from /tmp on unstack # clean_tmp @@ -1062,243 +38,6 @@ function cleanup_tmp { sudo rm -rf ${tmp_dir}/pip-build.* } -# Service wrapper to restart services -# restart_service service-name -function restart_service() { - if is_ubuntu; then - sudo /usr/sbin/service $1 restart - else - sudo /sbin/service $1 restart - fi -} - - -# _run_process() is designed to be backgrounded by run_process() to simulate a -# fork. It includes the dirty work of closing extra filehandles and preparing log -# files to produce the same logs as screen_it(). The log filename is derived -# from the service name and global-and-now-misnamed SCREEN_LOGDIR -# _run_process service "command-line" -function _run_process() { - local service=$1 - local command="$2" - - # Undo logging redirections and close the extra descriptors - exec 1>&3 - exec 2>&3 - exec 3>&- - exec 6>&- - - if [[ -n ${SCREEN_LOGDIR} ]]; then - exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1 - ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log - - # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs. - export PYTHONUNBUFFERED=1 - fi - - exec /bin/bash -c "$command" - die "$service exec failure: $command" -} - - -# run_process() launches a child process that closes all file descriptors and -# then exec's the passed in command. This is meant to duplicate the semantics -# of screen_it() without screen. PIDs are written to -# $SERVICE_DIR/$SCREEN_NAME/$service.pid -# run_process service "command-line" -function run_process() { - local service=$1 - local command="$2" - - # Spawn the child process - _run_process "$service" "$command" & - echo $! -} - - -# Helper to launch a service in a named screen -# screen_it service "command-line" -function screen_it { - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - USE_SCREEN=$(trueorfalse True $USE_SCREEN) - - if is_service_enabled $1; then - # Append the service to the screen rc file - screen_rc "$1" "$2" - - if [[ "$USE_SCREEN" = "True" ]]; then - screen -S $SCREEN_NAME -X screen -t $1 - - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log - screen -S $SCREEN_NAME -p $1 -X log on - ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log - fi - - # sleep to allow bash to be ready to be send the command - we are - # creating a new window in screen and then sends characters, so if - # bash isn't running by the time we send the command, nothing happens - sleep 1.5 - - NL=`echo -ne '\015'` - # This fun command does the following: - # - the passed server command is backgrounded - # - the pid of the background process is saved in the usual place - # - the server process is brought back to the foreground - # - if the server process exits prematurely the fg command errors - # and a message is written to stdout and the service failure file - # The pid saved can be used in screen_stop() as a process group - # id to kill off all child processes - screen -S $SCREEN_NAME -p $1 -X stuff "$2 & echo \$! >$SERVICE_DIR/$SCREEN_NAME/$1.pid; fg || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" - else - # Spawn directly without screen - run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid - fi - fi -} - - -# Stop a service in screen -# If a PID is available use it, kill the whole process group via TERM -# If screen is being used kill the screen window; this will catch processes -# that did not leave a PID behind -# screen_stop service -function screen_stop() { - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - USE_SCREEN=$(trueorfalse True $USE_SCREEN) - - if is_service_enabled $1; then - # Kill via pid if we have one available - if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then - pkill -TERM -P -$(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid) - rm $SERVICE_DIR/$SCREEN_NAME/$1.pid - fi - if [[ "$USE_SCREEN" = "True" ]]; then - # Clean up the screen window - screen -S $SCREEN_NAME -p $1 -X kill - fi - fi -} - - -# Screen rc file builder -# screen_rc service "command-line" -function screen_rc { - SCREEN_NAME=${SCREEN_NAME:-stack} - SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc - if [[ ! -e $SCREENRC ]]; then - # Name the screen session - echo "sessionname $SCREEN_NAME" > $SCREENRC - # Set a reasonable statusbar - echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC - # Some distributions override PROMPT_COMMAND for the screen terminal type - turn that off - echo "setenv PROMPT_COMMAND /bin/true" >> $SCREENRC - echo "screen -t shell bash" >> $SCREENRC - fi - # If this service doesn't already exist in the screenrc file - if ! grep $1 $SCREENRC 2>&1 > /dev/null; then - NL=`echo -ne '\015'` - echo "screen -t $1 bash" >> $SCREENRC - echo "stuff \"$2$NL\"" >> $SCREENRC - - if [[ -n ${SCREEN_LOGDIR} ]]; then - echo "logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log" >>$SCREENRC - echo "log on" >>$SCREENRC - fi - fi -} - - -# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``. -# This is used for ``service_check`` when all the ``screen_it`` are called finished -# init_service_check -function init_service_check() { - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - - if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then - mkdir -p "$SERVICE_DIR/$SCREEN_NAME" - fi - - rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure -} - - -# Helper to get the status of each running service -# service_check -function service_check() { - local service - local failures - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - - - if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then - echo "No service status directory found" - return - fi - - # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME - failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null` - - for service in $failures; do - service=`basename $service` - service=${service%.failure} - echo "Error: Service $service is not running" - done - - if [ -n "$failures" ]; then - echo "More details about the above errors can be found with screen, with ./rejoin-stack.sh" - fi -} - -# Returns true if the directory is on a filesystem mounted via NFS. -function is_nfs_directory() { - local mount_type=`stat -f -L -c %T $1` - test "$mount_type" == "nfs" -} - -# Only run the command if the target file (the last arg) is not on an -# NFS filesystem. -function _safe_permission_operation() { - local args=( $@ ) - local last - local sudo_cmd - local dir_to_check - - let last="${#args[*]} - 1" - - dir_to_check=${args[$last]} - if [ ! -d "$dir_to_check" ]; then - dir_to_check=`dirname "$dir_to_check"` - fi - - if is_nfs_directory "$dir_to_check" ; then - return 0 - fi - - if [[ $TRACK_DEPENDS = True ]]; then - sudo_cmd="env" - else - sudo_cmd="sudo" - fi - - $sudo_cmd $@ -} - -# Only change ownership of a file or directory if it is not on an NFS -# filesystem. -function safe_chown() { - _safe_permission_operation chown $@ -} - -# Only change permissions of a file or directory if it is not on an -# NFS filesystem. -function safe_chmod() { - _safe_permission_operation chmod $@ -} # ``pip install -e`` the package, which processes the dependencies # using pip before running `setup.py develop` @@ -1340,6 +79,7 @@ function setup_develop() { fi } + # ``pip install -e`` the package, which processes the dependencies # using pip before running `setup.py develop` # Uses globals ``STACK_USER`` @@ -1353,43 +93,6 @@ function setup_develop_no_requirements_update() { } -# Service wrapper to start services -# start_service service-name -function start_service() { - if is_ubuntu; then - sudo /usr/sbin/service $1 start - else - sudo /sbin/service $1 start - fi -} - - -# Service wrapper to stop services -# stop_service service-name -function stop_service() { - if is_ubuntu; then - sudo /usr/sbin/service $1 stop - else - sudo /sbin/service $1 stop - fi -} - - -# Normalize config values to True or False -# Accepts as False: 0 no No NO false False FALSE -# Accepts as True: 1 yes Yes YES true True TRUE -# VAR=$(trueorfalse default-value test-value) -function trueorfalse() { - local default=$1 - local testval=$2 - - [[ -z "$testval" ]] && { echo "$default"; return; } - [[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; } - [[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; } - echo "$default" -} - - # Retrieve an image from a URL and upload into Glance. # Uses the following variables: # @@ -1685,23 +388,6 @@ function use_database { } -# Toggle enable/disable_service for services that must run exclusive of each other -# $1 The name of a variable containing a space-separated list of services -# $2 The name of a variable in which to store the enabled service's name -# $3 The name of the service to enable -function use_exclusive_service { - local options=${!1} - local selection=$3 - out=$2 - [ -z $selection ] || [[ ! "$options" =~ "$selection" ]] && return 1 - for opt in $options;do - [[ "$opt" = "$selection" ]] && enable_service $opt || disable_service $opt - done - eval "$out=$selection" - return 0 -} - - # Wait for an HTTP server to start answering requests # wait_for_service timeout url function wait_for_service() { @@ -1711,30 +397,6 @@ function wait_for_service() { } -# Wrapper for ``yum`` to set proxy environment variables -# Uses globals ``OFFLINE``, ``*_proxy`` -# yum_install package [package ...] -function yum_install() { - [[ "$OFFLINE" = "True" ]] && return - local sudo="sudo" - [[ "$(id -u)" = "0" ]] && sudo="env" - $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ - no_proxy=$no_proxy \ - yum install -y "$@" -} - - -# zypper wrapper to set arguments correctly -# zypper_install package [package ...] -function zypper_install() { - [[ "$OFFLINE" = "True" ]] && return - local sudo="sudo" - [[ "$(id -u)" = "0" ]] && sudo="env" - $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ - zypper --non-interactive install --auto-agree-with-licenses "$@" -} - - # ping check # Uses globals ``ENABLED_SERVICES`` # ping_check from-net ip boot-timeout expected @@ -1809,36 +471,6 @@ function _ssh_check_novanet() { } -# Add a user to a group. -# add_user_to_group user group -function add_user_to_group() { - local user=$1 - local group=$2 - - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - # SLE11 and openSUSE 12.2 don't have the usual usermod - if ! is_suse || [[ "$os_VENDOR" = "openSUSE" && "$os_RELEASE" != "12.2" ]]; then - sudo usermod -a -G "$group" "$user" - else - sudo usermod -A "$group" "$user" - fi -} - - -# Get the path to the direcotry where python executables are installed. -# get_python_exec_prefix -function get_python_exec_prefix() { - if is_fedora || is_suse; then - echo "/usr/bin" - else - echo "/usr/local/bin" - fi -} - - # Get the location of the $module-rootwrap executables, where module is cinder # or nova. # get_rootwrap_location module @@ -1849,17 +481,6 @@ function get_rootwrap_location() { } -# Get the path to the pip command. -# get_pip_command -function get_pip_command() { - which pip || which pip-python - - if [ $? -ne 0 ]; then - die $LINENO "Unable to find pip; cannot continue" - fi -} - - # Path permissions sanity check # check_path_perm_sanity path function check_path_perm_sanity() { @@ -1944,37 +565,6 @@ vercmp_numbers() { } -# ``policy_add policy_file policy_name policy_permissions`` -# -# Add a policy to a policy.json file -# Do nothing if the policy already exists - -function policy_add() { - local policy_file=$1 - local policy_name=$2 - local policy_perm=$3 - - if grep -q ${policy_name} ${policy_file}; then - echo "Policy ${policy_name} already exists in ${policy_file}" - return - fi - - # Add a terminating comma to policy lines without one - # Remove the closing '}' and all lines following to the end-of-file - local tmpfile=$(mktemp) - uniq ${policy_file} | sed -e ' - s/]$/],/ - /^[}]/,$d - ' > ${tmpfile} - - # Append policy and closing brace - echo " \"${policy_name}\": ${policy_perm}" >>${tmpfile} - echo "}" >>${tmpfile} - - mv ${tmpfile} ${policy_file} -} - - # This function sets log formatting options for colorizing log # output to stdout. It is meant to be called by lib modules. # The last two parameters are optional and can be used to specify @@ -1994,10 +584,10 @@ function setup_colorized_logging() { iniset $conf_file $conf_section logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" } + # Restore xtrace $XTRACE - # Local variables: # mode: shell-script # End: diff --git a/functions-common b/functions-common new file mode 100644 index 0000000000..0cecb0b9fb --- /dev/null +++ b/functions-common @@ -0,0 +1,1433 @@ +# functions-common - Common functions used by DevStack components +# +# The canonical copy of this file is maintained in the DevStack repo. +# All modifications should be made there and then sync'ed to other repos +# as required. +# +# This file is sorted alphabetically within the function groups. +# +# - Config Functions +# - Control Functions +# - Distro Functions +# - Git Functions +# - OpenStack Functions +# - Package Functions +# - Process Functions +# - Python Functions +# - Service Functions +# +# The following variables are assumed to be defined by certain functions: +# +# - ``ENABLED_SERVICES`` +# - ``ERROR_ON_CLONE`` +# - ``FILES`` +# - ``OFFLINE`` +# - ``PIP_DOWNLOAD_CACHE`` +# - ``PIP_USE_MIRRORS`` +# - ``RECLONE`` +# - ``TRACK_DEPENDS`` +# - ``http_proxy``, ``https_proxy``, ``no_proxy`` + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Config Functions +# ================ + +# Append a new option in an ini file without replacing the old value +# iniadd config-file section option value1 value2 value3 ... +function iniadd() { + local file=$1 + local section=$2 + local option=$3 + shift 3 + local values="$(iniget_multiline $file $section $option) $@" + iniset_multiline $file $section $option $values +} + +# Comment an option in an INI file +# inicomment config-file section option +function inicomment() { + local file=$1 + local section=$2 + local option=$3 + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" +} + +# Get an option from an INI file +# iniget config-file section option +function iniget() { + local file=$1 + local section=$2 + local option=$3 + local line + line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + echo ${line#*=} +} + +# Get a multiple line option from an INI file +# iniget_multiline config-file section option +function iniget_multiline() { + local file=$1 + local section=$2 + local option=$3 + local values + values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file") + echo ${values} +} + +# Determinate is the given option present in the INI file +# ini_has_option config-file section option +function ini_has_option() { + local file=$1 + local section=$2 + local option=$3 + local line + line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + [ -n "$line" ] +} + +# Set an option in an INI file +# iniset config-file section option value +function iniset() { + local file=$1 + local section=$2 + local option=$3 + local value=$4 + + [[ -z $section || -z $option ]] && return + + if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then + # Add section at the end + echo -e "\n[$section]" >>"$file" + fi + if ! ini_has_option "$file" "$section" "$option"; then + # Add it + sed -i -e "/^\[$section\]/ a\\ +$option = $value +" "$file" + else + local sep=$(echo -ne "\x01") + # Replace it + sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" + fi +} + +# Set a multiple line option in an INI file +# iniset_multiline config-file section option value1 value2 valu3 ... +function iniset_multiline() { + local file=$1 + local section=$2 + local option=$3 + shift 3 + local values + for v in $@; do + # The later sed command inserts each new value in the line next to + # the section identifier, which causes the values to be inserted in + # the reverse order. Do a reverse here to keep the original order. + values="$v ${values}" + done + if ! grep -q "^\[$section\]" "$file"; then + # Add section at the end + echo -e "\n[$section]" >>"$file" + else + # Remove old values + sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file" + fi + # Add new ones + for v in $values; do + sed -i -e "/^\[$section\]/ a\\ +$option = $v +" "$file" + done +} + +# Uncomment an option in an INI file +# iniuncomment config-file section option +function iniuncomment() { + local file=$1 + local section=$2 + local option=$3 + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file" +} + +# Normalize config values to True or False +# Accepts as False: 0 no No NO false False FALSE +# Accepts as True: 1 yes Yes YES true True TRUE +# VAR=$(trueorfalse default-value test-value) +function trueorfalse() { + local default=$1 + local testval=$2 + + [[ -z "$testval" ]] && { echo "$default"; return; } + [[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; } + [[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; } + echo "$default" +} + + +# Control Functions +# ================= + +# Prints backtrace info +# filename:lineno:function +# backtrace level +function backtrace { + local level=$1 + local deep=$((${#BASH_SOURCE[@]} - 1)) + echo "[Call Trace]" + while [ $level -le $deep ]; do + echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}" + deep=$((deep - 1)) + done +} + +# Prints line number and "message" then exits +# die $LINENO "message" +function die() { + local exitcode=$? + set +o xtrace + local line=$1; shift + if [ $exitcode == 0 ]; then + exitcode=1 + fi + backtrace 2 + err $line "$*" + exit $exitcode +} + +# Checks an environment variable is not set or has length 0 OR if the +# exit code is non-zero and prints "message" and exits +# NOTE: env-var is the variable name without a '$' +# die_if_not_set $LINENO env-var "message" +function die_if_not_set() { + local exitcode=$? + FXTRACE=$(set +o | grep xtrace) + set +o xtrace + local line=$1; shift + local evar=$1; shift + if ! is_set $evar || [ $exitcode != 0 ]; then + die $line "$*" + fi + $FXTRACE +} + +# Prints line number and "message" in error format +# err $LINENO "message" +function err() { + local exitcode=$? + errXTRACE=$(set +o | grep xtrace) + set +o xtrace + local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2" + echo $msg 1>&2; + if [[ -n ${SCREEN_LOGDIR} ]]; then + echo $msg >> "${SCREEN_LOGDIR}/error.log" + fi + $errXTRACE + return $exitcode +} + +# Checks an environment variable is not set or has length 0 OR if the +# exit code is non-zero and prints "message" +# NOTE: env-var is the variable name without a '$' +# err_if_not_set $LINENO env-var "message" +function err_if_not_set() { + local exitcode=$? + errinsXTRACE=$(set +o | grep xtrace) + set +o xtrace + local line=$1; shift + local evar=$1; shift + if ! is_set $evar || [ $exitcode != 0 ]; then + err $line "$*" + fi + $errinsXTRACE + return $exitcode +} + +# Exit after outputting a message about the distribution not being supported. +# exit_distro_not_supported [optional-string-telling-what-is-missing] +function exit_distro_not_supported { + if [[ -z "$DISTRO" ]]; then + GetDistro + fi + + if [ $# -gt 0 ]; then + die $LINENO "Support for $DISTRO is incomplete: no support for $@" + else + die $LINENO "Support for $DISTRO is incomplete." + fi +} + +# Test if the named environment variable is set and not zero length +# is_set env-var +function is_set() { + local var=\$"$1" + eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this +} + +# Prints line number and "message" in warning format +# warn $LINENO "message" +function warn() { + local exitcode=$? + errXTRACE=$(set +o | grep xtrace) + set +o xtrace + local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2" + echo $msg 1>&2; + if [[ -n ${SCREEN_LOGDIR} ]]; then + echo $msg >> "${SCREEN_LOGDIR}/error.log" + fi + $errXTRACE + return $exitcode +} + + +# Distro Functions +# ================ + +# Determine OS Vendor, Release and Update +# Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora +# Returns results in global variables: +# os_VENDOR - vendor name +# os_RELEASE - release +# os_UPDATE - update +# os_PACKAGE - package type +# os_CODENAME - vendor's codename for release +# GetOSVersion +GetOSVersion() { + # Figure out which vendor we are + if [[ -x "`which sw_vers 2>/dev/null`" ]]; then + # OS/X + os_VENDOR=`sw_vers -productName` + os_RELEASE=`sw_vers -productVersion` + os_UPDATE=${os_RELEASE##*.} + os_RELEASE=${os_RELEASE%.*} + os_PACKAGE="" + if [[ "$os_RELEASE" =~ "10.7" ]]; then + os_CODENAME="lion" + elif [[ "$os_RELEASE" =~ "10.6" ]]; then + os_CODENAME="snow leopard" + elif [[ "$os_RELEASE" =~ "10.5" ]]; then + os_CODENAME="leopard" + elif [[ "$os_RELEASE" =~ "10.4" ]]; then + os_CODENAME="tiger" + elif [[ "$os_RELEASE" =~ "10.3" ]]; then + os_CODENAME="panther" + else + os_CODENAME="" + fi + elif [[ -x $(which lsb_release 2>/dev/null) ]]; then + os_VENDOR=$(lsb_release -i -s) + os_RELEASE=$(lsb_release -r -s) + os_UPDATE="" + os_PACKAGE="rpm" + if [[ "Debian,Ubuntu,LinuxMint" =~ $os_VENDOR ]]; then + os_PACKAGE="deb" + elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then + lsb_release -d -s | grep -q openSUSE + if [[ $? -eq 0 ]]; then + os_VENDOR="openSUSE" + fi + elif [[ $os_VENDOR == "openSUSE project" ]]; then + os_VENDOR="openSUSE" + elif [[ $os_VENDOR =~ Red.*Hat ]]; then + os_VENDOR="Red Hat" + fi + os_CODENAME=$(lsb_release -c -s) + elif [[ -r /etc/redhat-release ]]; then + # Red Hat Enterprise Linux Server release 5.5 (Tikanga) + # Red Hat Enterprise Linux Server release 7.0 Beta (Maipo) + # CentOS release 5.5 (Final) + # CentOS Linux release 6.0 (Final) + # Fedora release 16 (Verne) + # XenServer release 6.2.0-70446c (xenenterprise) + os_CODENAME="" + for r in "Red Hat" CentOS Fedora XenServer; do + os_VENDOR=$r + if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then + ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release` + os_CODENAME=${ver#*|} + os_RELEASE=${ver%|*} + os_UPDATE=${os_RELEASE##*.} + os_RELEASE=${os_RELEASE%.*} + break + fi + os_VENDOR="" + done + os_PACKAGE="rpm" + elif [[ -r /etc/SuSE-release ]]; then + for r in openSUSE "SUSE Linux"; do + if [[ "$r" = "SUSE Linux" ]]; then + os_VENDOR="SUSE LINUX" + else + os_VENDOR=$r + fi + + if [[ -n "`grep \"$r\" /etc/SuSE-release`" ]]; then + os_CODENAME=`grep "CODENAME = " /etc/SuSE-release | sed 's:.* = ::g'` + os_RELEASE=`grep "VERSION = " /etc/SuSE-release | sed 's:.* = ::g'` + os_UPDATE=`grep "PATCHLEVEL = " /etc/SuSE-release | sed 's:.* = ::g'` + break + fi + os_VENDOR="" + done + os_PACKAGE="rpm" + # If lsb_release is not installed, we should be able to detect Debian OS + elif [[ -f /etc/debian_version ]] && [[ $(cat /proc/version) =~ "Debian" ]]; then + os_VENDOR="Debian" + os_PACKAGE="deb" + os_CODENAME=$(awk '/VERSION=/' /etc/os-release | sed 's/VERSION=//' | sed -r 's/\"|\(|\)//g' | awk '{print $2}') + os_RELEASE=$(awk '/VERSION_ID=/' /etc/os-release | sed 's/VERSION_ID=//' | sed 's/\"//g') + fi + export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME +} + +# Translate the OS version values into common nomenclature +# Sets global ``DISTRO`` from the ``os_*`` values +function GetDistro() { + GetOSVersion + if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then + # 'Everyone' refers to Ubuntu / Debian releases by the code name adjective + DISTRO=$os_CODENAME + elif [[ "$os_VENDOR" =~ (Fedora) ]]; then + # For Fedora, just use 'f' and the release + DISTRO="f$os_RELEASE" + elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then + DISTRO="opensuse-$os_RELEASE" + elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then + # For SLE, also use the service pack + if [[ -z "$os_UPDATE" ]]; then + DISTRO="sle${os_RELEASE}" + else + DISTRO="sle${os_RELEASE}sp${os_UPDATE}" + fi + elif [[ "$os_VENDOR" =~ (Red Hat) || "$os_VENDOR" =~ (CentOS) ]]; then + # Drop the . release as we assume it's compatible + DISTRO="rhel${os_RELEASE::1}" + elif [[ "$os_VENDOR" =~ (XenServer) ]]; then + DISTRO="xs$os_RELEASE" + else + # Catch-all for now is Vendor + Release + Update + DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE" + fi + export DISTRO +} + +# Utility function for checking machine architecture +# is_arch arch-type +function is_arch { + ARCH_TYPE=$1 + + [[ "$(uname -m)" == "$ARCH_TYPE" ]] +} + +# Determine if current distribution is a Fedora-based distribution +# (Fedora, RHEL, CentOS, etc). +# is_fedora +function is_fedora { + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || [ "$os_VENDOR" = "CentOS" ] +} + + +# Determine if current distribution is a SUSE-based distribution +# (openSUSE, SLE). +# is_suse +function is_suse { + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + [ "$os_VENDOR" = "openSUSE" ] || [ "$os_VENDOR" = "SUSE LINUX" ] +} + + +# Determine if current distribution is an Ubuntu-based distribution +# It will also detect non-Ubuntu but Debian-based distros +# is_ubuntu +function is_ubuntu { + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + [ "$os_PACKAGE" = "deb" ] +} + + +# Git Functions +# ============= + +# git clone only if directory doesn't exist already. Since ``DEST`` might not +# be owned by the installation user, we create the directory and change the +# ownership to the proper user. +# Set global RECLONE=yes to simulate a clone when dest-dir exists +# Set global ERROR_ON_CLONE=True to abort execution with an error if the git repo +# does not exist (default is False, meaning the repo will be cloned). +# Uses global ``OFFLINE`` +# git_clone remote dest-dir branch +function git_clone { + GIT_REMOTE=$1 + GIT_DEST=$2 + GIT_REF=$3 + RECLONE=$(trueorfalse False $RECLONE) + + if [[ "$OFFLINE" = "True" ]]; then + echo "Running in offline mode, clones already exist" + # print out the results so we know what change was used in the logs + cd $GIT_DEST + git show --oneline | head -1 + return + fi + + if echo $GIT_REF | egrep -q "^refs"; then + # If our branch name is a gerrit style refs/changes/... + if [[ ! -d $GIT_DEST ]]; then + [[ "$ERROR_ON_CLONE" = "True" ]] && \ + die $LINENO "Cloning not allowed in this configuration" + git clone $GIT_REMOTE $GIT_DEST + fi + cd $GIT_DEST + git fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD + else + # do a full clone only if the directory doesn't exist + if [[ ! -d $GIT_DEST ]]; then + [[ "$ERROR_ON_CLONE" = "True" ]] && \ + die $LINENO "Cloning not allowed in this configuration" + git clone $GIT_REMOTE $GIT_DEST + cd $GIT_DEST + # This checkout syntax works for both branches and tags + git checkout $GIT_REF + elif [[ "$RECLONE" = "True" ]]; then + # if it does exist then simulate what clone does if asked to RECLONE + cd $GIT_DEST + # set the url to pull from and fetch + git remote set-url origin $GIT_REMOTE + git fetch origin + # remove the existing ignored files (like pyc) as they cause breakage + # (due to the py files having older timestamps than our pyc, so python + # thinks the pyc files are correct using them) + find $GIT_DEST -name '*.pyc' -delete + + # handle GIT_REF accordingly to type (tag, branch) + if [[ -n "`git show-ref refs/tags/$GIT_REF`" ]]; then + git_update_tag $GIT_REF + elif [[ -n "`git show-ref refs/heads/$GIT_REF`" ]]; then + git_update_branch $GIT_REF + elif [[ -n "`git show-ref refs/remotes/origin/$GIT_REF`" ]]; then + git_update_remote_branch $GIT_REF + else + die $LINENO "$GIT_REF is neither branch nor tag" + fi + + fi + fi + + # print out the results so we know what change was used in the logs + cd $GIT_DEST + git show --oneline | head -1 +} + +# git update using reference as a branch. +# git_update_branch ref +function git_update_branch() { + + GIT_BRANCH=$1 + + git checkout -f origin/$GIT_BRANCH + # a local branch might not exist + git branch -D $GIT_BRANCH || true + git checkout -b $GIT_BRANCH +} + +# git update using reference as a branch. +# git_update_remote_branch ref +function git_update_remote_branch() { + + GIT_BRANCH=$1 + + git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH +} + +# git update using reference as a tag. Be careful editing source at that repo +# as working copy will be in a detached mode +# git_update_tag ref +function git_update_tag() { + + GIT_TAG=$1 + + git tag -d $GIT_TAG + # fetching given tag only + git fetch origin tag $GIT_TAG + git checkout -f $GIT_TAG +} + + +# OpenStack Functions +# =================== + +# Get the default value for HOST_IP +# get_default_host_ip fixed_range floating_range host_ip_iface host_ip +function get_default_host_ip() { + local fixed_range=$1 + local floating_range=$2 + local host_ip_iface=$3 + local host_ip=$4 + + # Find the interface used for the default route + host_ip_iface=${host_ip_iface:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)} + # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable + if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then + host_ip="" + host_ips=`LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/"); print parts[1]}'` + for IP in $host_ips; do + # Attempt to filter out IP addresses that are part of the fixed and + # floating range. Note that this method only works if the ``netaddr`` + # python library is installed. If it is not installed, an error + # will be printed and the first IP from the interface will be used. + # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct + # address. + if ! (address_in_net $IP $fixed_range || address_in_net $IP $floating_range); then + host_ip=$IP + break; + fi + done + fi + echo $host_ip +} + +# Grab a numbered field from python prettytable output +# Fields are numbered starting with 1 +# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc. +# get_field field-number +function get_field() { + while read data; do + if [ "$1" -lt 0 ]; then + field="(\$(NF$1))" + else + field="\$$(($1 + 1))" + fi + echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}" + done +} + +# Add a policy to a policy.json file +# Do nothing if the policy already exists +# ``policy_add policy_file policy_name policy_permissions`` +function policy_add() { + local policy_file=$1 + local policy_name=$2 + local policy_perm=$3 + + if grep -q ${policy_name} ${policy_file}; then + echo "Policy ${policy_name} already exists in ${policy_file}" + return + fi + + # Add a terminating comma to policy lines without one + # Remove the closing '}' and all lines following to the end-of-file + local tmpfile=$(mktemp) + uniq ${policy_file} | sed -e ' + s/]$/],/ + /^[}]/,$d + ' > ${tmpfile} + + # Append policy and closing brace + echo " \"${policy_name}\": ${policy_perm}" >>${tmpfile} + echo "}" >>${tmpfile} + + mv ${tmpfile} ${policy_file} +} + + +# Package Functions +# ================= + +# _get_package_dir +function _get_package_dir() { + local pkg_dir + if is_ubuntu; then + pkg_dir=$FILES/apts + elif is_fedora; then + pkg_dir=$FILES/rpms + elif is_suse; then + pkg_dir=$FILES/rpms-suse + else + exit_distro_not_supported "list of packages" + fi + echo "$pkg_dir" +} + +# Wrapper for ``apt-get`` to set cache and proxy environment variables +# Uses globals ``OFFLINE``, ``*_proxy`` +# apt_get operation package [package ...] +function apt_get() { + [[ "$OFFLINE" = "True" || -z "$@" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + $sudo DEBIAN_FRONTEND=noninteractive \ + http_proxy=$http_proxy https_proxy=$https_proxy \ + no_proxy=$no_proxy \ + apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@" +} + +# get_packages() collects a list of package names of any type from the +# prerequisite files in ``files/{apts|rpms}``. The list is intended +# to be passed to a package installer such as apt or yum. +# +# Only packages required for the services in 1st argument will be +# included. Two bits of metadata are recognized in the prerequisite files: +# +# - ``# NOPRIME`` defers installation to be performed later in `stack.sh` +# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection +# of the package to the distros listed. The distro names are case insensitive. +function get_packages() { + local services=$@ + local package_dir=$(_get_package_dir) + local file_to_parse + local service + + if [[ -z "$package_dir" ]]; then + echo "No package directory supplied" + return 1 + fi + if [[ -z "$DISTRO" ]]; then + GetDistro + fi + for service in ${services//,/ }; do + # Allow individual services to specify dependencies + if [[ -e ${package_dir}/${service} ]]; then + file_to_parse="${file_to_parse} $service" + fi + # NOTE(sdague) n-api needs glance for now because that's where + # glance client is + if [[ $service == n-api ]]; then + if [[ ! $file_to_parse =~ nova ]]; then + file_to_parse="${file_to_parse} nova" + fi + if [[ ! $file_to_parse =~ glance ]]; then + file_to_parse="${file_to_parse} glance" + fi + elif [[ $service == c-* ]]; then + if [[ ! $file_to_parse =~ cinder ]]; then + file_to_parse="${file_to_parse} cinder" + fi + elif [[ $service == ceilometer-* ]]; then + if [[ ! $file_to_parse =~ ceilometer ]]; then + file_to_parse="${file_to_parse} ceilometer" + fi + elif [[ $service == s-* ]]; then + if [[ ! $file_to_parse =~ swift ]]; then + file_to_parse="${file_to_parse} swift" + fi + elif [[ $service == n-* ]]; then + if [[ ! $file_to_parse =~ nova ]]; then + file_to_parse="${file_to_parse} nova" + fi + elif [[ $service == g-* ]]; then + if [[ ! $file_to_parse =~ glance ]]; then + file_to_parse="${file_to_parse} glance" + fi + elif [[ $service == key* ]]; then + if [[ ! $file_to_parse =~ keystone ]]; then + file_to_parse="${file_to_parse} keystone" + fi + elif [[ $service == q-* ]]; then + if [[ ! $file_to_parse =~ neutron ]]; then + file_to_parse="${file_to_parse} neutron" + fi + fi + done + + for file in ${file_to_parse}; do + local fname=${package_dir}/${file} + local OIFS line package distros distro + [[ -e $fname ]] || continue + + OIFS=$IFS + IFS=$'\n' + for line in $(<${fname}); do + if [[ $line =~ "NOPRIME" ]]; then + continue + fi + + # Assume we want this package + package=${line%#*} + inst_pkg=1 + + # Look for # dist:xxx in comment + if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then + # We are using BASH regexp matching feature. + package=${BASH_REMATCH[1]} + distros=${BASH_REMATCH[2]} + # In bash ${VAR,,} will lowecase VAR + # Look for a match in the distro list + if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then + # If no match then skip this package + inst_pkg=0 + fi + fi + + # Look for # testonly in comment + if [[ $line =~ (.*)#.*testonly.* ]]; then + package=${BASH_REMATCH[1]} + # Are we installing test packages? (test for the default value) + if [[ $INSTALL_TESTONLY_PACKAGES = "False" ]]; then + # If not installing test packages the skip this package + inst_pkg=0 + fi + fi + + if [[ $inst_pkg = 1 ]]; then + echo $package + fi + done + IFS=$OIFS + done +} + +# Distro-agnostic package installer +# install_package package [package ...] +function install_package() { + if is_ubuntu; then + [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update + NO_UPDATE_REPOS=True + + apt_get install "$@" + elif is_fedora; then + yum_install "$@" + elif is_suse; then + zypper_install "$@" + else + exit_distro_not_supported "installing packages" + fi +} + +# Distro-agnostic function to tell if a package is installed +# is_package_installed package [package ...] +function is_package_installed() { + if [[ -z "$@" ]]; then + return 1 + fi + + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + + if [[ "$os_PACKAGE" = "deb" ]]; then + dpkg -s "$@" > /dev/null 2> /dev/null + elif [[ "$os_PACKAGE" = "rpm" ]]; then + rpm --quiet -q "$@" + else + exit_distro_not_supported "finding if a package is installed" + fi +} + +# Distro-agnostic package uninstaller +# uninstall_package package [package ...] +function uninstall_package() { + if is_ubuntu; then + apt_get purge "$@" + elif is_fedora; then + sudo yum remove -y "$@" + elif is_suse; then + sudo zypper rm "$@" + else + exit_distro_not_supported "uninstalling packages" + fi +} + +# Wrapper for ``yum`` to set proxy environment variables +# Uses globals ``OFFLINE``, ``*_proxy`` +# yum_install package [package ...] +function yum_install() { + [[ "$OFFLINE" = "True" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ + no_proxy=$no_proxy \ + yum install -y "$@" +} + +# zypper wrapper to set arguments correctly +# zypper_install package [package ...] +function zypper_install() { + [[ "$OFFLINE" = "True" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ + zypper --non-interactive install --auto-agree-with-licenses "$@" +} + + +# Process Functions +# ================= + +# _run_process() is designed to be backgrounded by run_process() to simulate a +# fork. It includes the dirty work of closing extra filehandles and preparing log +# files to produce the same logs as screen_it(). The log filename is derived +# from the service name and global-and-now-misnamed SCREEN_LOGDIR +# _run_process service "command-line" +function _run_process() { + local service=$1 + local command="$2" + + # Undo logging redirections and close the extra descriptors + exec 1>&3 + exec 2>&3 + exec 3>&- + exec 6>&- + + if [[ -n ${SCREEN_LOGDIR} ]]; then + exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1 + ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log + + # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs. + export PYTHONUNBUFFERED=1 + fi + + exec /bin/bash -c "$command" + die "$service exec failure: $command" +} + +# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``. +# This is used for ``service_check`` when all the ``screen_it`` are called finished +# init_service_check +function init_service_check() { + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + + if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then + mkdir -p "$SERVICE_DIR/$SCREEN_NAME" + fi + + rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure +} + +# Find out if a process exists by partial name. +# is_running name +function is_running() { + local name=$1 + ps auxw | grep -v grep | grep ${name} > /dev/null + RC=$? + # some times I really hate bash reverse binary logic + return $RC +} + +# run_process() launches a child process that closes all file descriptors and +# then exec's the passed in command. This is meant to duplicate the semantics +# of screen_it() without screen. PIDs are written to +# $SERVICE_DIR/$SCREEN_NAME/$service.pid +# run_process service "command-line" +function run_process() { + local service=$1 + local command="$2" + + # Spawn the child process + _run_process "$service" "$command" & + echo $! +} + +# Helper to launch a service in a named screen +# screen_it service "command-line" +function screen_it { + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + USE_SCREEN=$(trueorfalse True $USE_SCREEN) + + if is_service_enabled $1; then + # Append the service to the screen rc file + screen_rc "$1" "$2" + + if [[ "$USE_SCREEN" = "True" ]]; then + screen -S $SCREEN_NAME -X screen -t $1 + + if [[ -n ${SCREEN_LOGDIR} ]]; then + screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log + screen -S $SCREEN_NAME -p $1 -X log on + ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log + fi + + # sleep to allow bash to be ready to be send the command - we are + # creating a new window in screen and then sends characters, so if + # bash isn't running by the time we send the command, nothing happens + sleep 1.5 + + NL=`echo -ne '\015'` + # This fun command does the following: + # - the passed server command is backgrounded + # - the pid of the background process is saved in the usual place + # - the server process is brought back to the foreground + # - if the server process exits prematurely the fg command errors + # and a message is written to stdout and the service failure file + # The pid saved can be used in screen_stop() as a process group + # id to kill off all child processes + screen -S $SCREEN_NAME -p $1 -X stuff "$2 & echo \$! >$SERVICE_DIR/$SCREEN_NAME/$1.pid; fg || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" + else + # Spawn directly without screen + run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid + fi + fi +} + +# Screen rc file builder +# screen_rc service "command-line" +function screen_rc { + SCREEN_NAME=${SCREEN_NAME:-stack} + SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc + if [[ ! -e $SCREENRC ]]; then + # Name the screen session + echo "sessionname $SCREEN_NAME" > $SCREENRC + # Set a reasonable statusbar + echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC + # Some distributions override PROMPT_COMMAND for the screen terminal type - turn that off + echo "setenv PROMPT_COMMAND /bin/true" >> $SCREENRC + echo "screen -t shell bash" >> $SCREENRC + fi + # If this service doesn't already exist in the screenrc file + if ! grep $1 $SCREENRC 2>&1 > /dev/null; then + NL=`echo -ne '\015'` + echo "screen -t $1 bash" >> $SCREENRC + echo "stuff \"$2$NL\"" >> $SCREENRC + + if [[ -n ${SCREEN_LOGDIR} ]]; then + echo "logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log" >>$SCREENRC + echo "log on" >>$SCREENRC + fi + fi +} + +# Stop a service in screen +# If a PID is available use it, kill the whole process group via TERM +# If screen is being used kill the screen window; this will catch processes +# that did not leave a PID behind +# screen_stop service +function screen_stop() { + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + USE_SCREEN=$(trueorfalse True $USE_SCREEN) + + if is_service_enabled $1; then + # Kill via pid if we have one available + if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then + pkill -TERM -P -$(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid) + rm $SERVICE_DIR/$SCREEN_NAME/$1.pid + fi + if [[ "$USE_SCREEN" = "True" ]]; then + # Clean up the screen window + screen -S $SCREEN_NAME -p $1 -X kill + fi + fi +} + +# Helper to get the status of each running service +# service_check +function service_check() { + local service + local failures + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + + + if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then + echo "No service status directory found" + return + fi + + # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME + failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null` + + for service in $failures; do + service=`basename $service` + service=${service%.failure} + echo "Error: Service $service is not running" + done + + if [ -n "$failures" ]; then + echo "More details about the above errors can be found with screen, with ./rejoin-stack.sh" + fi +} + + +# Python Functions +# ================ + +# Get the path to the pip command. +# get_pip_command +function get_pip_command() { + which pip || which pip-python + + if [ $? -ne 0 ]; then + die $LINENO "Unable to find pip; cannot continue" + fi +} + +# Get the path to the direcotry where python executables are installed. +# get_python_exec_prefix +function get_python_exec_prefix() { + if is_fedora || is_suse; then + echo "/usr/bin" + else + echo "/usr/local/bin" + fi +} + +# Wrapper for ``pip install`` to set cache and proxy environment variables +# Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``PIP_USE_MIRRORS``, +# ``TRACK_DEPENDS``, ``*_proxy`` +# pip_install package [package ...] +function pip_install { + [[ "$OFFLINE" = "True" || -z "$@" ]] && return + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + if [[ $TRACK_DEPENDS = True ]]; then + source $DEST/.venv/bin/activate + CMD_PIP=$DEST/.venv/bin/pip + SUDO_PIP="env" + else + SUDO_PIP="sudo" + CMD_PIP=$(get_pip_command) + fi + + # Mirror option not needed anymore because pypi has CDN available, + # but it's useful in certain circumstances + PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False} + if [[ "$PIP_USE_MIRRORS" != "False" ]]; then + PIP_MIRROR_OPT="--use-mirrors" + fi + + # pip < 1.4 has a bug where it will use an already existing build + # directory unconditionally. Say an earlier component installs + # foo v1.1; pip will have built foo's source in + # /tmp/$USER-pip-build. Even if a later component specifies foo < + # 1.1, the existing extracted build will be used and cause + # confusing errors. By creating unique build directories we avoid + # this problem. See https://github.com/pypa/pip/issues/709 + local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX) + + $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \ + HTTP_PROXY=$http_proxy \ + HTTPS_PROXY=$https_proxy \ + NO_PROXY=$no_proxy \ + $CMD_PIP install --build=${pip_build_tmp} \ + $PIP_MIRROR_OPT $@ \ + && $SUDO_PIP rm -rf ${pip_build_tmp} +} + + +# Service Functions +# ================= + +# remove extra commas from the input string (i.e. ``ENABLED_SERVICES``) +# _cleanup_service_list service-list +function _cleanup_service_list () { + echo "$1" | sed -e ' + s/,,/,/g; + s/^,//; + s/,$// + ' +} + +# disable_all_services() removes all current services +# from ``ENABLED_SERVICES`` to reset the configuration +# before a minimal installation +# Uses global ``ENABLED_SERVICES`` +# disable_all_services +function disable_all_services() { + ENABLED_SERVICES="" +} + +# Remove all services starting with '-'. For example, to install all default +# services except rabbit (rabbit) set in ``localrc``: +# ENABLED_SERVICES+=",-rabbit" +# Uses global ``ENABLED_SERVICES`` +# disable_negated_services +function disable_negated_services() { + local tmpsvcs="${ENABLED_SERVICES}" + local service + for service in ${tmpsvcs//,/ }; do + if [[ ${service} == -* ]]; then + tmpsvcs=$(echo ${tmpsvcs}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g") + fi + done + ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") +} + +# disable_service() removes the services passed as argument to the +# ``ENABLED_SERVICES`` list, if they are present. +# +# For example: +# disable_service rabbit +# +# This function does not know about the special cases +# for nova, glance, and neutron built into is_service_enabled(). +# Uses global ``ENABLED_SERVICES`` +# disable_service service [service ...] +function disable_service() { + local tmpsvcs=",${ENABLED_SERVICES}," + local service + for service in $@; do + if is_service_enabled $service; then + tmpsvcs=${tmpsvcs//,$service,/,} + fi + done + ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") +} + +# enable_service() adds the services passed as argument to the +# ``ENABLED_SERVICES`` list, if they are not already present. +# +# For example: +# enable_service qpid +# +# This function does not know about the special cases +# for nova, glance, and neutron built into is_service_enabled(). +# Uses global ``ENABLED_SERVICES`` +# enable_service service [service ...] +function enable_service() { + local tmpsvcs="${ENABLED_SERVICES}" + for service in $@; do + if ! is_service_enabled $service; then + tmpsvcs+=",$service" + fi + done + ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") + disable_negated_services +} + +# is_service_enabled() checks if the service(s) specified as arguments are +# enabled by the user in ``ENABLED_SERVICES``. +# +# Multiple services specified as arguments are ``OR``'ed together; the test +# is a short-circuit boolean, i.e it returns on the first match. +# +# There are special cases for some 'catch-all' services:: +# **nova** returns true if any service enabled start with **n-** +# **cinder** returns true if any service enabled start with **c-** +# **ceilometer** returns true if any service enabled start with **ceilometer** +# **glance** returns true if any service enabled start with **g-** +# **neutron** returns true if any service enabled start with **q-** +# **swift** returns true if any service enabled start with **s-** +# **trove** returns true if any service enabled start with **tr-** +# For backward compatibility if we have **swift** in ENABLED_SERVICES all the +# **s-** services will be enabled. This will be deprecated in the future. +# +# Cells within nova is enabled if **n-cell** is in ``ENABLED_SERVICES``. +# We also need to make sure to treat **n-cell-region** and **n-cell-child** +# as enabled in this case. +# +# Uses global ``ENABLED_SERVICES`` +# is_service_enabled service [service ...] +function is_service_enabled() { + services=$@ + for service in ${services}; do + [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 + + # Look for top-level 'enabled' function for this service + if type is_${service}_enabled >/dev/null 2>&1; then + # A function exists for this service, use it + is_${service}_enabled + return $? + fi + + # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled() + # are implemented + + [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && return 0 + [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0 + [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0 + [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0 + [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0 + [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && return 0 + [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 + [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && return 0 + [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0 + [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0 + done + return 1 +} + +# Toggle enable/disable_service for services that must run exclusive of each other +# $1 The name of a variable containing a space-separated list of services +# $2 The name of a variable in which to store the enabled service's name +# $3 The name of the service to enable +function use_exclusive_service { + local options=${!1} + local selection=$3 + out=$2 + [ -z $selection ] || [[ ! "$options" =~ "$selection" ]] && return 1 + for opt in $options;do + [[ "$opt" = "$selection" ]] && enable_service $opt || disable_service $opt + done + eval "$out=$selection" + return 0 +} + + +# System Function +# =============== + +# Only run the command if the target file (the last arg) is not on an +# NFS filesystem. +function _safe_permission_operation() { + local args=( $@ ) + local last + local sudo_cmd + local dir_to_check + + let last="${#args[*]} - 1" + + dir_to_check=${args[$last]} + if [ ! -d "$dir_to_check" ]; then + dir_to_check=`dirname "$dir_to_check"` + fi + + if is_nfs_directory "$dir_to_check" ; then + return 0 + fi + + if [[ $TRACK_DEPENDS = True ]]; then + sudo_cmd="env" + else + sudo_cmd="sudo" + fi + + $sudo_cmd $@ +} + +# Exit 0 if address is in network or 1 if address is not in network +# ip-range is in CIDR notation: 1.2.3.4/20 +# address_in_net ip-address ip-range +function address_in_net() { + local ip=$1 + local range=$2 + local masklen=${range#*/} + local network=$(maskip ${range%/*} $(cidr2netmask $masklen)) + local subnet=$(maskip $ip $(cidr2netmask $masklen)) + [[ $network == $subnet ]] +} + +# Add a user to a group. +# add_user_to_group user group +function add_user_to_group() { + local user=$1 + local group=$2 + + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + # SLE11 and openSUSE 12.2 don't have the usual usermod + if ! is_suse || [[ "$os_VENDOR" = "openSUSE" && "$os_RELEASE" != "12.2" ]]; then + sudo usermod -a -G "$group" "$user" + else + sudo usermod -A "$group" "$user" + fi +} + +# Convert CIDR notation to a IPv4 netmask +# cidr2netmask cidr-bits +function cidr2netmask() { + local maskpat="255 255 255 255" + local maskdgt="254 252 248 240 224 192 128" + set -- ${maskpat:0:$(( ($1 / 8) * 4 ))}${maskdgt:$(( (7 - ($1 % 8)) * 4 )):3} + echo ${1-0}.${2-0}.${3-0}.${4-0} +} + +# Gracefully cp only if source file/dir exists +# cp_it source destination +function cp_it { + if [ -e $1 ] || [ -d $1 ]; then + cp -pRL $1 $2 + fi +} + +# HTTP and HTTPS proxy servers are supported via the usual environment variables [1] +# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in +# ``localrc`` or on the command line if necessary:: +# +# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html +# +# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh + +function export_proxy_variables() { + if [[ -n "$http_proxy" ]]; then + export http_proxy=$http_proxy + fi + if [[ -n "$https_proxy" ]]; then + export https_proxy=$https_proxy + fi + if [[ -n "$no_proxy" ]]; then + export no_proxy=$no_proxy + fi +} + +# Returns true if the directory is on a filesystem mounted via NFS. +function is_nfs_directory() { + local mount_type=`stat -f -L -c %T $1` + test "$mount_type" == "nfs" +} + +# Return the network portion of the given IP address using netmask +# netmask is in the traditional dotted-quad format +# maskip ip-address netmask +function maskip() { + local ip=$1 + local mask=$2 + local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}" + local subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.})) + echo $subnet +} + +# Service wrapper to restart services +# restart_service service-name +function restart_service() { + if is_ubuntu; then + sudo /usr/sbin/service $1 restart + else + sudo /sbin/service $1 restart + fi +} + +# Only change permissions of a file or directory if it is not on an +# NFS filesystem. +function safe_chmod() { + _safe_permission_operation chmod $@ +} + +# Only change ownership of a file or directory if it is not on an NFS +# filesystem. +function safe_chown() { + _safe_permission_operation chown $@ +} + +# Service wrapper to start services +# start_service service-name +function start_service() { + if is_ubuntu; then + sudo /usr/sbin/service $1 start + else + sudo /sbin/service $1 start + fi +} + +# Service wrapper to stop services +# stop_service service-name +function stop_service() { + if is_ubuntu; then + sudo /usr/sbin/service $1 stop + else + sudo /sbin/service $1 stop + fi +} + + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: From abc7b1d765665b66a027fe93c841b62e537c7843 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 12 Feb 2014 12:09:22 -0600 Subject: [PATCH 0137/4119] Backport Grenade updates Backport changes made in Grenade's copy of functions since the last sync: * d0654b9,i 4c7726e - get_release_name_from_branch() * 7907766 - edits to install_package() Change-Id: I0714c0b1072f1360c3c08fe24225e65e2a550fad --- functions-common | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 0cecb0b9fb..d92e39cd91 100644 --- a/functions-common +++ b/functions-common @@ -460,6 +460,17 @@ function is_ubuntu { # Git Functions # ============= +# Returns openstack release name for a given branch name +# ``get_release_name_from_branch branch-name`` +function get_release_name_from_branch(){ + local branch=$1 + if [[ $branch =~ "stable/" ]]; then + echo ${branch#*/} + else + echo "master" + fi +} + # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the # ownership to the proper user. @@ -792,7 +803,9 @@ function get_packages() { # install_package package [package ...] function install_package() { if is_ubuntu; then - [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update + # if there are transient errors pulling the updates, that's fine. It may + # be secondary repositories that we don't really care about. + [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update || /bin/true NO_UPDATE_REPOS=True apt_get install "$@" From 3f918a4541a49cc0d50d2931f8670e6e0074280e Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Tue, 11 Feb 2014 11:47:47 -0700 Subject: [PATCH 0138/4119] Set DEFAULT_IMAGE_NAME for Docker hypervisor This allows the tempest configuration to set the right image for booting docker containers with Nova. Since glance uploads are not yet integrated in devstack, IMAGE_URLS remains empty. Change-Id: I5df153cd1d5e1411bb3c11816122ce280148e129 --- lib/nova_plugins/hypervisor-docker | 2 +- stackrc | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index cdd9317761..b5df19db02 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -32,7 +32,7 @@ DOCKER_PID_FILE=/var/run/docker.pid DOCKER_REGISTRY_PORT=${DOCKER_REGISTRY_PORT:-5042} DOCKER_IMAGE=${DOCKER_IMAGE:-cirros:latest} -DOCKER_IMAGE_NAME=cirros +DOCKER_IMAGE_NAME=$DEFAULT_IMAGE_NAME DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-registry:latest} DOCKER_REGISTRY_IMAGE_NAME=registry DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME} diff --git a/stackrc b/stackrc index 7eed60cb2c..d754f3b074 100644 --- a/stackrc +++ b/stackrc @@ -280,6 +280,9 @@ case "$VIRT_DRIVER" in openvz) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64} IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz"};; + docker) + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros} + IMAGE_URLS=${IMAGE_URLS:-};; libvirt) case "$LIBVIRT_TYPE" in lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc From 1d50d78560910779d28db85591fbb67e1617ff34 Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Wed, 12 Feb 2014 18:23:36 -0500 Subject: [PATCH 0139/4119] Fix service name for marconi This patch updates TEMPEST_SERVICES, to have the same name as devstack marconi service. Change-Id: Ibc9b4a66fccd3d95ddd1717bf549476bd843204a Implements: blueprint add-basic-marconi-tests --- lib/marconi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index d1ab5f3a5c..0aaff1bd58 100644 --- a/lib/marconi +++ b/lib/marconi @@ -52,7 +52,7 @@ MARCONICLIENT_REPO=${MARCONICLIENT_REPO:-${GIT_BASE}/openstack/python-marconicli MARCONICLIENT_BRANCH=${MARCONICLIENT_BRANCH:-master} # Tell Tempest this project is present -TEMPEST_SERVICES+=,marconi +TEMPEST_SERVICES+=,marconi-server # Functions From a42541a9fb00e21b278a06d4034528976cbf8336 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 13 Feb 2014 09:39:15 -0500 Subject: [PATCH 0140/4119] add heat to the default devstack service list heat has been integrated for a while, we should turn it on out of the box. Also refactor the service list to make it simpler to understand what's enabled. Change-Id: I9738f39ce196d5c7f75b0a5b164222ea165fb340 --- stackrc | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 9166a171e1..165196c545 100644 --- a/stackrc +++ b/stackrc @@ -35,7 +35,18 @@ fi # enable_service neutron # # Optional, to enable tempest configuration as part of devstack # enable_service tempest -ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,mysql + +# core compute (glance / keystone / nova (+ nova-network)) +ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,n-sch,n-novnc,n-xvnc,n-cauth +# cinder +ENABLED_SERVICES+=,c-sch,c-api,c-vol +# heat +ENABLED_SERVICES+=,h-eng,h-api,h-api-cfn,h-api-cw +# dashboard +ENABLED_SERVICES+=,horizon +# additional services +ENABLED_SERVICES+=,rabbit,tempest,mysql + # Tell Tempest which services are available. The default is set here as # Tempest falls late in the configuration sequence. This differs from From 1bcd2800271d6a72237084ad7f36f84072eecd18 Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Thu, 13 Feb 2014 15:14:41 +0000 Subject: [PATCH 0141/4119] Don't warn about heat modifying flavors Since de0898a Heat no longer modifies flavors, so the comment and output related to modified flavors is no longer needed. Change-Id: I1007d2ab3387f28b8d7487f450cab4592f2824aa --- stack.sh | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/stack.sh b/stack.sh index e45707b781..1dc4b74ab3 100755 --- a/stack.sh +++ b/stack.sh @@ -1181,7 +1181,7 @@ fi # Configure and launch heat engine, api and metadata if is_service_enabled heat; then - # Initialize heat, including replacing nova flavors + # Initialize heat echo_summary "Configuring Heat" init_heat echo_summary "Starting Heat" @@ -1350,11 +1350,6 @@ if is_service_enabled horizon; then echo "Horizon is now available at http://$SERVICE_HOST/" fi -# Warn that the default flavors have been changed by Heat -if is_service_enabled heat; then - echo "Heat has replaced the default flavors. View by running: nova flavor-list" -fi - # If Keystone is present you can point ``nova`` cli to this server if is_service_enabled key; then echo "Keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/" From 4074e298112ba179ba743982c6904c8bd70030b2 Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Fri, 14 Feb 2014 00:54:58 +0900 Subject: [PATCH 0142/4119] Use lowercase section name in Neutron ML2 security group config All other security group configs in Neutron are lower-case and it should be consistent. Change-Id: I683333c1e186446a69172446cca6d9b952673ed4 Closes-Bug: #1279862 --- lib/neutron_plugins/ml2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index ab4e3474a6..4ceabe765d 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -93,9 +93,9 @@ function neutron_plugin_configure_service() { # instead use its own config variable to indicate whether security # groups is enabled, and that will need to be set here instead. if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver neutron.agent.not.a.real.FirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.not.a.real.FirewallDriver else - iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver neutron.agent.firewall.NoopFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver fi # Since we enable the tunnel TypeDrivers, also enable a local_ip From 22dece0d969b476cf187fe7359fa38d96189cdc1 Mon Sep 17 00:00:00 2001 From: John Eckersberg Date: Thu, 13 Feb 2014 16:21:24 -0500 Subject: [PATCH 0143/4119] Add Fedora support to install_docker.sh On Fedora, just install the docker-io package as supplied in the Fedora repository. Change-Id: Iea74878d3e1c434863c188ea2253817384e56bf4 --- tools/docker/install_docker.sh | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh index b9e1b242dd..27c8c8210b 100755 --- a/tools/docker/install_docker.sh +++ b/tools/docker/install_docker.sh @@ -30,15 +30,19 @@ SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} # Install Docker Service # ====================== -# Stop the auto-repo updates and do it when required here -NO_UPDATE_REPOS=True - -# Set up home repo -curl https://get.docker.io/gpg | sudo apt-key add - -install_package python-software-properties && \ - sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list" -apt_get update -install_package --force-yes lxc-docker socat +if is_fedora; then + install_package docker-io socat +else + # Stop the auto-repo updates and do it when required here + NO_UPDATE_REPOS=True + + # Set up home repo + curl https://get.docker.io/gpg | sudo apt-key add - + install_package python-software-properties && \ + sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list" + apt_get update + install_package --force-yes lxc-docker socat +fi # Start the daemon - restart just in case the package ever auto-starts... restart_service docker From d6997d317685353482a0aa7a18408c1313583460 Mon Sep 17 00:00:00 2001 From: John Griffith Date: Thu, 13 Feb 2014 22:56:29 +0000 Subject: [PATCH 0144/4119] Fix tee statement for catching tempest output The use of exec and tee doesn't seem to be quite right, and was unreliable in terms of catching the exit status of the tempest test as well as not catching the output when things went wrong. This changes the way we do the redirect and the tee to something that should be more robust and seems to work reliably in testing. Change-Id: Ieb9d725839fb8e3f9e2e63a2b7b2e9c7c86713a2 --- driver_certs/cinder_driver_cert.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh index edcc6d4800..8380deea42 100755 --- a/driver_certs/cinder_driver_cert.sh +++ b/driver_certs/cinder_driver_cert.sh @@ -89,9 +89,8 @@ start_cinder sleep 5 # run tempest api/volume/test_* -log_message "Run the actual tempest volume tests (./tools/pretty_tox.sh api.volume_*)...", True -exec 2> >(tee -a $TEMPFILE) -`./tools/pretty_tox.sh api.volume` +log_message "Run the actual tempest volume tests (./tools/pretty_tox.sh api.volume)...", True +./tools/pretty_tox.sh api.volume 2>&1 | tee -a $TEMPFILE if [[ $? = 0 ]]; then log_message "CONGRATULATIONS!!! Device driver PASSED!", True log_message "Submit output: ($TEMPFILE)" From 0b3aacc707ab8b3593285e02dc172b3c96730efc Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Thu, 13 Feb 2014 18:18:51 -0500 Subject: [PATCH 0145/4119] Fix MARCONI_USER This patch fixes the MARCONI_USER in create_marconi_accounts(). Change-Id: I9618530fa20ee84d25646107c7450017ada908df --- lib/marconi | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index 0aaff1bd58..e400419d57 100644 --- a/lib/marconi +++ b/lib/marconi @@ -154,10 +154,12 @@ function create_marconi_accounts() { MARCONI_USER=$(get_id keystone user-create --name=marconi \ --pass="$SERVICE_PASSWORD" \ --tenant-id $SERVICE_TENANT \ - --email=marconi@example.com) + --email=marconi@example.com \ + | grep " id " | get_field 2) keystone user-role-add --tenant-id $SERVICE_TENANT \ --user-id $MARCONI_USER \ --role-id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then MARCONI_SERVICE=$(keystone service-create \ --name=marconi \ From 16d3ad057dc0b982c801fcfa9d5497c1daeb34cd Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Thu, 13 Feb 2014 18:59:50 -0600 Subject: [PATCH 0146/4119] Use database connection for keystone The keystone configuration used the 'connection' option in the 'sql' section of the keystone.conf file. This option is deprecated in favor of 'connection' in the 'database' section. The keystone setup code is changed to use the option in the new section rather than the deprecated one. Change-Id: I62fd2f50ded3b8848e9e5225e88c80ed8fed3bff --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 4f7f68b57f..5e2e88d33f 100644 --- a/lib/keystone +++ b/lib/keystone @@ -201,7 +201,7 @@ function configure_keystone() { iniset $KEYSTONE_CONF token provider keystone.token.providers.uuid.Provider fi - iniset $KEYSTONE_CONF sql connection `database_connection_url keystone` + iniset $KEYSTONE_CONF database connection `database_connection_url keystone` iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2" if [[ "$KEYSTONE_TOKEN_BACKEND" = "sql" ]]; then From 41d96d7b4837c6bafc2622954a3c6c1fdcc13a82 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 11 Feb 2014 09:08:35 -0600 Subject: [PATCH 0147/4119] Marconi fixes for precise Fix a couple of problems found on Ubuntu: * $MARCONI_DIR/etc/marconi/policy.json doesn't exist; removed the copy to /etc/marconi * added a seting of nssize in /etc/mongodb.conf for Ubuntu * restart the correct serice name on Ubuntu Change-Id: I9bd2ab1aa4fb94ff96559e069e5b62138c358fb5 --- lib/marconi | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/lib/marconi b/lib/marconi index 1eaebbdf16..3d5ef82471 100644 --- a/lib/marconi +++ b/lib/marconi @@ -82,10 +82,6 @@ function configure_marconi() { iniset $MARCONI_CONF DEFAULT verbose True iniset $MARCONI_CONF 'drivers:transport:wsgi' bind '0.0.0.0' - # Install the policy file for the API server - cp $MARCONI_DIR/etc/marconi/policy.json $MARCONI_CONF_DIR - iniset $MARCONI_CONF DEFAULT policy_file $MARCONI_CONF_DIR/policy.json - iniset $MARCONI_CONF keystone_authtoken auth_protocol http iniset $MARCONI_CONF keystone_authtoken admin_user marconi iniset $MARCONI_CONF keystone_authtoken admin_password $SERVICE_PASSWORD @@ -102,9 +98,16 @@ function configure_marconi() { function configure_mongodb() { # Set nssize to 2GB. This increases the number of namespaces supported # # per database. - sudo sed -i '/--nssize/!s/OPTIONS=\"/OPTIONS=\"--nssize 2047 /' /etc/sysconfig/mongod - - restart_service mongod + if is_ubuntu; then + sudo sed -i -e " + s|[^ \t]*#[ \t]*\(nssize[ \t]*=.*\$\)|\1| + s|^\(nssize[ \t]*=[ \t]*\).*\$|\1 2047| + " /etc/mongodb.conf + restart_service mongodb + elif is_fedora; then + sudo sed -i '/--nssize/!s/OPTIONS=\"/OPTIONS=\"--nssize 2047 /' /etc/sysconfig/mongod + restart_service mongod + fi } # init_marconi() - Initialize etc. From 1e4e3acaadc1397a7d69a83e8fe9a54dd879983a Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Fri, 14 Feb 2014 11:29:26 -0500 Subject: [PATCH 0148/4119] Revert the tempest service name to marconi This patch is to rollback the change introduced by https://review.openstack.org/#/c/73100/. 73100 is no longer needed because of the recent https://review.openstack.org/#/c/69497/. Using 'marconi' as the service name will keep us aligned with the naming convention used by other projects. Change-Id: I5da6d2aaeb5c9dc29a1cbc70c8425449807eb34c --- lib/marconi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index 0aaff1bd58..d1ab5f3a5c 100644 --- a/lib/marconi +++ b/lib/marconi @@ -52,7 +52,7 @@ MARCONICLIENT_REPO=${MARCONICLIENT_REPO:-${GIT_BASE}/openstack/python-marconicli MARCONICLIENT_BRANCH=${MARCONICLIENT_BRANCH:-master} # Tell Tempest this project is present -TEMPEST_SERVICES+=,marconi-server +TEMPEST_SERVICES+=,marconi # Functions From 5705db691386809e288758a0314dfa60d9b36da7 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Sat, 1 Feb 2014 20:06:42 -0500 Subject: [PATCH 0149/4119] Optionally enable file injection There is a patch up for nova right now that disables file injection by default. This is a corresponding devstack change that only sets file injection options if it is enabled in the devstack config. This is good to keep around so that we can easily turn it on for testing. The nova change is id Icff1304fc816acc843f8962727aef8bbbc7bbaa3. Change-Id: I5015f2c351b1d680c205d7f9a5204febca490b91 --- lib/nova | 6 ------ lib/nova_plugins/hypervisor-libvirt | 16 ++++++++++++++++ 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/lib/nova b/lib/nova index eaaaa6210c..d90aea7108 100644 --- a/lib/nova +++ b/lib/nova @@ -513,12 +513,6 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" iniset_rpc_backend nova $NOVA_CONF DEFAULT iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" - - if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - # File injection is being disabled by default in the near future - - # disable it here for now to avoid surprises later. - iniset $NOVA_CONF libvirt inject_partition '-2' - fi } function init_nova_cells() { diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 42d3af15cf..415244ffae 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -23,6 +23,9 @@ set +o xtrace # Defaults # -------- +# File injection is disabled by default in Nova. This will turn it back on. +ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False} + # Entry Points # ------------ @@ -116,6 +119,19 @@ EOF" if is_arch "ppc64"; then iniset $NOVA_CONF DEFAULT vnc_enabled "false" fi + + ENABLE_FILE_INJECTION=$(trueorfalse False $ENABLE_FILE_INJECTION) + if [[ "$ENABLE_FILE_INJECTION" = "True" ]] ; then + # When libguestfs is available for file injection, enable using + # libguestfs to inspect the image and figure out the proper + # partition to inject into. + iniset $NOVA_CONF libvirt inject_partition '-1' + iniset $NOVA_CONF libvirt inject_key 'true' + else + # File injection is being disabled by default in the near future - + # disable it here for now to avoid surprises later. + iniset $NOVA_CONF libvirt inject_partition '-2' + fi } # install_nova_hypervisor() - Install external components From 19685428e3d3e51ff88aa5254f7c27d476053798 Mon Sep 17 00:00:00 2001 From: Steve Martinelli Date: Fri, 24 Jan 2014 13:02:26 -0600 Subject: [PATCH 0150/4119] Change most keystoneclient commands to openstacklient in libs migrated most keystoneclient commands from the following libs: ceilometer cinder ironic keystone marconi neutron nova savanna swift trove Also need to set and unset openstackclient specific environment variables from stack.sh Change-Id: I725f30bc08e1df5a4c5770576c19ad1ddaeb843a --- lib/ceilometer | 36 +++++++++++++-------------- lib/cinder | 39 ++++++++++++++--------------- lib/ironic | 30 +++++++++++----------- lib/keystone | 67 ++++++++++++++++++++++++++++---------------------- lib/marconi | 32 +++++++++++++----------- lib/neutron | 32 ++++++++++++------------ lib/nova | 38 ++++++++++++++-------------- lib/savanna | 32 ++++++++++++------------ lib/swift | 50 +++++++++++++++++++++++-------------- lib/trove | 29 +++++++++++----------- stack.sh | 4 +++ 11 files changed, 209 insertions(+), 180 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 4ca77bb72b..6c87d03b13 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -71,33 +71,33 @@ function is_ceilometer_enabled { create_ceilometer_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") # Ceilometer if [[ "$ENABLED_SERVICES" =~ "ceilometer-api" ]]; then - CEILOMETER_USER=$(keystone user-create \ - --name=ceilometer \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=ceilometer@example.com \ + CEILOMETER_USER=$(openstack user create \ + ceilometer \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email ceilometer@example.com \ | grep " id " | get_field 2) - keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $CEILOMETER_USER \ - --role-id $ADMIN_ROLE + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $CEILOMETER_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - CEILOMETER_SERVICE=$(keystone service-create \ - --name=ceilometer \ + CEILOMETER_SERVICE=$(openstack service create \ + ceilometer \ --type=metering \ --description="OpenStack Telemetry Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $CEILOMETER_SERVICE \ --region RegionOne \ - --service_id $CEILOMETER_SERVICE \ - --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" \ - --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" \ - --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" + --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ + --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ + --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" fi fi } diff --git a/lib/cinder b/lib/cinder index d5e78bb39c..c8c90c098d 100644 --- a/lib/cinder +++ b/lib/cinder @@ -330,45 +330,44 @@ function configure_cinder() { # Migrated from keystone_data.sh create_cinder_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") # Cinder if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - CINDER_USER=$(keystone user-create \ - --name=cinder \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=cinder@example.com \ + CINDER_USER=$(openstack user create \ + cinder \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email cinder@example.com \ | grep " id " | get_field 2) - keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $CINDER_USER \ - --role-id $ADMIN_ROLE + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $CINDER_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - CINDER_SERVICE=$(keystone service-create \ - --name=cinder \ + CINDER_SERVICE=$(openstack service create \ + cinder \ --type=volume \ --description="Cinder Volume Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $CINDER_SERVICE \ --region RegionOne \ - --service_id $CINDER_SERVICE \ --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" - CINDER_V2_SERVICE=$(keystone service-create \ - --name=cinderv2 \ + CINDER_V2_SERVICE=$(openstack service create \ + cinderv2 \ --type=volumev2 \ --description="Cinder Volume Service V2" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $CINDER_V2_SERVICE \ --region RegionOne \ - --service_id $CINDER_V2_SERVICE \ --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \ --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \ --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" - fi fi } diff --git a/lib/ironic b/lib/ironic index 3c0e3cbaf7..607b13125a 100644 --- a/lib/ironic +++ b/lib/ironic @@ -145,30 +145,30 @@ function create_ironic_cache_dir() { # service ironic admin # if enabled create_ironic_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") # Ironic if [[ "$ENABLED_SERVICES" =~ "ir-api" ]]; then - IRONIC_USER=$(keystone user-create \ - --name=ironic \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=ironic@example.com \ + IRONIC_USER=$(openstack user create \ + ironic \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email ironic@example.com \ | grep " id " | get_field 2) - keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user_id $IRONIC_USER \ - --role_id $ADMIN_ROLE + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $IRONIC_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - IRONIC_SERVICE=$(keystone service-create \ - --name=ironic \ + IRONIC_SERVICE=$(openstack service create \ + ironic \ --type=baremetal \ --description="Ironic baremetal provisioning service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $IRONIC_SERVICE \ --region RegionOne \ - --service_id $IRONIC_SERVICE \ --publicurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \ --adminurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \ --internalurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" diff --git a/lib/keystone b/lib/keystone index 4f7f68b57f..bf0dcbb1bb 100644 --- a/lib/keystone +++ b/lib/keystone @@ -275,60 +275,69 @@ function configure_keystone() { create_keystone_accounts() { # admin - ADMIN_TENANT=$(keystone tenant-create \ - --name admin \ + ADMIN_TENANT=$(openstack project create \ + admin \ | grep " id " | get_field 2) - ADMIN_USER=$(keystone user-create \ - --name admin \ - --pass "$ADMIN_PASSWORD" \ + ADMIN_USER=$(openstack user create \ + admin \ + --project "$ADMIN_TENANT" \ --email admin@example.com \ + --password "$ADMIN_PASSWORD" \ | grep " id " | get_field 2) - ADMIN_ROLE=$(keystone role-create \ - --name admin \ + ADMIN_ROLE=$(openstack role create \ + admin \ | grep " id " | get_field 2) - keystone user-role-add \ - --user-id $ADMIN_USER \ - --role-id $ADMIN_ROLE \ - --tenant-id $ADMIN_TENANT + openstack role add \ + $ADMIN_ROLE \ + --project $ADMIN_TENANT \ + --user $ADMIN_USER # service - SERVICE_TENANT=$(keystone tenant-create \ - --name $SERVICE_TENANT_NAME \ + SERVICE_TENANT=$(openstack project create \ + $SERVICE_TENANT_NAME \ | grep " id " | get_field 2) # The Member role is used by Horizon and Swift so we need to keep it: - MEMBER_ROLE=$(keystone role-create --name=Member | grep " id " | get_field 2) + MEMBER_ROLE=$(openstack role create \ + Member \ + | grep " id " | get_field 2) # ANOTHER_ROLE demonstrates that an arbitrary role may be created and used # TODO(sleepsonthefloor): show how this can be used for rbac in the future! - ANOTHER_ROLE=$(keystone role-create --name=anotherrole | grep " id " | get_field 2) + ANOTHER_ROLE=$(openstack role create \ + anotherrole \ + | grep " id " | get_field 2) # invisible tenant - admin can't see this one - INVIS_TENANT=$(keystone tenant-create --name=invisible_to_admin | grep " id " | get_field 2) + INVIS_TENANT=$(openstack project create \ + invisible_to_admin \ + | grep " id " | get_field 2) # demo - DEMO_TENANT=$(keystone tenant-create \ - --name=demo \ + DEMO_TENANT=$(openstack project create \ + demo \ | grep " id " | get_field 2) - DEMO_USER=$(keystone user-create \ - --name demo \ - --pass "$ADMIN_PASSWORD" \ + DEMO_USER=$(openstack user create \ + demo \ + --project $DEMO_TENANT \ --email demo@example.com \ + --password "$ADMIN_PASSWORD" \ | grep " id " | get_field 2) - keystone user-role-add --user-id $DEMO_USER --role-id $MEMBER_ROLE --tenant-id $DEMO_TENANT - keystone user-role-add --user-id $ADMIN_USER --role-id $ADMIN_ROLE --tenant-id $DEMO_TENANT - keystone user-role-add --user-id $DEMO_USER --role-id $ANOTHER_ROLE --tenant-id $DEMO_TENANT - keystone user-role-add --user-id $DEMO_USER --role-id $MEMBER_ROLE --tenant-id $INVIS_TENANT + + openstack role add --project $DEMO_TENANT --user $DEMO_USER $MEMBER_ROLE + openstack role add --project $DEMO_TENANT --user $ADMIN_USER $ADMIN_ROLE + openstack role add --project $DEMO_TENANT --user $DEMO_USER $ANOTHER_ROLE + openstack role add --project $INVIS_TENANT --user $DEMO_USER $MEMBER_ROLE # Keystone if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - KEYSTONE_SERVICE=$(keystone service-create \ - --name keystone \ + KEYSTONE_SERVICE=$(openstack service create \ + keystone \ --type identity \ --description "Keystone Identity Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $KEYSTONE_SERVICE \ --region RegionOne \ - --service_id $KEYSTONE_SERVICE \ --publicurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION" \ --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v$IDENTITY_API_VERSION" \ --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION" diff --git a/lib/marconi b/lib/marconi index 7c8fd14255..88312cb1bd 100644 --- a/lib/marconi +++ b/lib/marconi @@ -151,27 +151,29 @@ function stop_marconi() { } function create_marconi_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") - - MARCONI_USER=$(get_id keystone user-create --name=marconi \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=marconi@example.com \ - | grep " id " | get_field 2) - keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $MARCONI_USER \ - --role-id $ADMIN_ROLE + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") + + MARCONI_USER=$(openstack user create \ + marconi \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email marconi@example.com \ + | grep " id " | get_field 2) + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $MARCONI_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - MARCONI_SERVICE=$(keystone service-create \ - --name=marconi \ + MARCONI_SERVICE=$(openstack service create \ + marconi \ --type=queuing \ --description="Marconi Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $MARCONI_SERVICE \ --region RegionOne \ - --service_id $MARCONI_SERVICE \ --publicurl "http://$SERVICE_HOST:8888" \ --adminurl "http://$SERVICE_HOST:8888" \ --internalurl "http://$SERVICE_HOST:8888" diff --git a/lib/neutron b/lib/neutron index 5bd38bcf73..df276c71d5 100644 --- a/lib/neutron +++ b/lib/neutron @@ -332,29 +332,29 @@ function create_neutron_cache_dir() { # Migrated from keystone_data.sh function create_neutron_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - NEUTRON_USER=$(keystone user-create \ - --name=neutron \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=neutron@example.com \ + NEUTRON_USER=$(openstack user create \ + neutron \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email neutron@example.com \ | grep " id " | get_field 2) - keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $NEUTRON_USER \ - --role-id $ADMIN_ROLE + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $NEUTRON_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - NEUTRON_SERVICE=$(keystone service-create \ - --name=neutron \ + NEUTRON_SERVICE=$(openstack service create \ + neutron \ --type=network \ --description="Neutron Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $NEUTRON_SERVICE \ --region RegionOne \ - --service_id $NEUTRON_SERVICE \ --publicurl "http://$SERVICE_HOST:9696/" \ --adminurl "http://$SERVICE_HOST:9696/" \ --internalurl "http://$SERVICE_HOST:9696/" @@ -363,7 +363,7 @@ function create_neutron_accounts() { } function create_neutron_initial_network() { - TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1) + TENANT_ID=$(openstack project list | grep " demo " | get_field 1) die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for demo" # Create a small network diff --git a/lib/nova b/lib/nova index d90aea7108..fefeda1236 100644 --- a/lib/nova +++ b/lib/nova @@ -324,41 +324,41 @@ function configure_nova() { # Migrated from keystone_data.sh create_nova_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") # Nova if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - NOVA_USER=$(keystone user-create \ - --name=nova \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=nova@example.com \ + NOVA_USER=$(openstack user create \ + nova \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email nova@example.com \ | grep " id " | get_field 2) - keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $NOVA_USER \ - --role-id $ADMIN_ROLE + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $NOVA_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - NOVA_SERVICE=$(keystone service-create \ - --name=nova \ + NOVA_SERVICE=$(openstack service create \ + nova \ --type=compute \ --description="Nova Compute Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $NOVA_SERVICE \ --region RegionOne \ - --service_id $NOVA_SERVICE \ --publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" - NOVA_V3_SERVICE=$(keystone service-create \ - --name=novav3 \ + NOVA_V3_SERVICE=$(openstack service create \ + novav3 \ --type=computev3 \ --description="Nova Compute Service V3" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $NOVA_V3_SERVICE \ --region RegionOne \ - --service_id $NOVA_V3_SERVICE \ --publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \ --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \ --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" diff --git a/lib/savanna b/lib/savanna index 6f42311971..43c5e386fe 100644 --- a/lib/savanna +++ b/lib/savanna @@ -54,29 +54,29 @@ TEMPEST_SERVICES+=,savanna # service savanna admin function create_savanna_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") - - SAVANNA_USER=$(keystone user-create \ - --name=savanna \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=savanna@example.com \ + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") + + SAVANNA_USER=$(openstack user create \ + savanna \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email savanna@example.com \ | grep " id " | get_field 2) - keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $SAVANNA_USER \ - --role-id $ADMIN_ROLE + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $SAVANNA_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - SAVANNA_SERVICE=$(keystone service-create \ - --name=savanna \ + SAVANNA_SERVICE=$(openstack service create \ + savanna \ --type=data_processing \ --description="Savanna Data Processing" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $SAVANNA_SERVICE \ --region RegionOne \ - --service_id $SAVANNA_SERVICE \ --publicurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ --adminurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ --internalurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" diff --git a/lib/swift b/lib/swift index be25c81468..df586abe8b 100644 --- a/lib/swift +++ b/lib/swift @@ -527,39 +527,53 @@ function create_swift_accounts() { KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql} - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") - - SWIFT_USER=$(keystone user-create --name=swift --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT --email=swift@example.com | grep " id " | get_field 2) - keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $SWIFT_USER --role-id $ADMIN_ROLE + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") + + SWIFT_USER=$(openstack user create \ + swift \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email=swift@example.com \ + | grep " id " | get_field 2) + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $SWIFT_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - SWIFT_SERVICE=$(keystone service-create --name=swift --type="object-store" \ - --description="Swift Service" | grep " id " | get_field 2) - keystone endpoint-create \ + SWIFT_SERVICE=$(openstack service create \ + swift \ + --type="object-store" \ + --description="Swift Service" \ + | grep " id " | get_field 2) + openstack endpoint create \ + $SWIFT_SERVICE \ --region RegionOne \ - --service_id $SWIFT_SERVICE \ --publicurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \ --adminurl "http://$SERVICE_HOST:8080" \ --internalurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" fi - SWIFT_TENANT_TEST1=$(keystone tenant-create --name=swifttenanttest1 | grep " id " | get_field 2) + SWIFT_TENANT_TEST1=$(openstack project create swifttenanttest1 | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_TENANT_TEST1 "Failure creating SWIFT_TENANT_TEST1" - SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=$SWIFTUSERTEST1_PASSWORD --email=test@example.com | grep " id " | get_field 2) + SWIFT_USER_TEST1=$(openstack user create swiftusertest1 --password=$SWIFTUSERTEST1_PASSWORD \ + --project "$SWIFT_TENANT_TEST1" --email=test@example.com | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1" - keystone user-role-add --user-id $SWIFT_USER_TEST1 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST1 + openstack role add --user $SWIFT_USER_TEST1 --project $SWIFT_TENANT_TEST1 $ADMIN_ROLE - SWIFT_USER_TEST3=$(keystone user-create --name=swiftusertest3 --pass=$SWIFTUSERTEST3_PASSWORD --email=test3@example.com | grep " id " | get_field 2) + SWIFT_USER_TEST3=$(openstack user create swiftusertest3 --password=$SWIFTUSERTEST3_PASSWORD \ + --project "$SWIFT_TENANT_TEST1" --email=test3@example.com | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_USER_TEST3 "Failure creating SWIFT_USER_TEST3" - keystone user-role-add --user-id $SWIFT_USER_TEST3 --role-id $ANOTHER_ROLE --tenant-id $SWIFT_TENANT_TEST1 + openstack role add --user $SWIFT_USER_TEST3 --project $SWIFT_TENANT_TEST1 $ANOTHER_ROLE - SWIFT_TENANT_TEST2=$(keystone tenant-create --name=swifttenanttest2 | grep " id " | get_field 2) + SWIFT_TENANT_TEST2=$(openstack project create swifttenanttest2 | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_TENANT_TEST2 "Failure creating SWIFT_TENANT_TEST2" - SWIFT_USER_TEST2=$(keystone user-create --name=swiftusertest2 --pass=$SWIFTUSERTEST2_PASSWORD --email=test2@example.com | grep " id " | get_field 2) + + SWIFT_USER_TEST2=$(openstack user create swiftusertest2 --password=$SWIFTUSERTEST2_PASSWORD \ + --project "$SWIFT_TENANT_TEST2" --email=test2@example.com | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_USER_TEST2 "Failure creating SWIFT_USER_TEST2" - keystone user-role-add --user-id $SWIFT_USER_TEST2 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST2 + openstack role add --user $SWIFT_USER_TEST2 --project $SWIFT_TENANT_TEST2 $ADMIN_ROLE } # init_swift() - Initialize rings diff --git a/lib/trove b/lib/trove index bb4549121d..5e1bbd548d 100644 --- a/lib/trove +++ b/lib/trove @@ -71,28 +71,29 @@ function setup_trove_logging() { create_trove_accounts() { # Trove - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - SERVICE_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + SERVICE_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") if [[ "$ENABLED_SERVICES" =~ "trove" ]]; then - TROVE_USER=$(keystone user-create \ - --name=trove \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=trove@example.com \ + TROVE_USER=$(openstack user create \ + trove \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email trove@example.com \ | grep " id " | get_field 2) - keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $TROVE_USER \ - --role-id $SERVICE_ROLE + openstack role add \ + $SERVICE_ROLE \ + --project $SERVICE_TENANT \ + --user $TROVE_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - TROVE_SERVICE=$(keystone service-create \ - --name=trove \ + TROVE_SERVICE=$(openstack service create + trove \ --type=database \ --description="Trove Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $TROVE_SERVICE \ --region RegionOne \ - --service_id $TROVE_SERVICE \ --publicurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \ --adminurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \ --internalurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" diff --git a/stack.sh b/stack.sh index c153132485..e5d87cca11 100755 --- a/stack.sh +++ b/stack.sh @@ -925,6 +925,9 @@ if is_service_enabled key; then # Do the keystone-specific bits from keystone_data.sh export OS_SERVICE_TOKEN=$SERVICE_TOKEN export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT + # Add temporarily to make openstackclient work + export OS_TOKEN=$SERVICE_TOKEN + export OS_URL=$SERVICE_ENDPOINT create_keystone_accounts create_nova_accounts create_cinder_accounts @@ -947,6 +950,7 @@ if is_service_enabled key; then bash -x $FILES/keystone_data.sh # Set up auth creds now that keystone is bootstrapped + unset OS_TOKEN OS_URL export OS_AUTH_URL=$SERVICE_ENDPOINT export OS_TENANT_NAME=admin export OS_USERNAME=admin From 33d1f86a4931de76fba555a9a3f5e5fa3fd7c171 Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Thu, 13 Feb 2014 15:00:33 +0000 Subject: [PATCH 0151/4119] Add support for creating heat stack domain The Heat instance-users blueprint requires an additional domain where heat creates projects and users related to stack resources so add support for creating this domain when configured to install Heat. Note a workaround is currently required to make the openstack command work with the v3 keystone API. Change-Id: I36157372d85b577952b55481ca5cc42146011a54 --- lib/heat | 20 ++++++++++++++++++++ stack.sh | 4 ++++ 2 files changed, 24 insertions(+) diff --git a/lib/heat b/lib/heat index 9f5dd8b588..efb01ef3b8 100644 --- a/lib/heat +++ b/lib/heat @@ -110,6 +110,15 @@ function configure_heat() { iniset $HEAT_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 iniset $HEAT_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens + # stack user domain + # Note we have to pass token/endpoint here because the current endpoint and + # version negotiation in OSC means just --os-identity-api-version=3 won't work + KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" + D_ID=$(openstack --os-token $OS_SERVICE_TOKEN --os-url=$KS_ENDPOINT_V3 \ + --os-identity-api-version=3 domain show heat \ + | grep ' id ' | get_field 2) + iniset $HEAT_CONF stack_user_domain ${D_ID} + # paste_deploy [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_CONF paste_deploy flavor standalone @@ -196,6 +205,17 @@ function disk_image_create { upload_image "http://localhost/$output.qcow2" $TOKEN } +# create_heat_accounts() - Set up common required heat accounts +# Note this is in addition to what is in files/keystone_data.sh +function create_heat_accounts() { + # Note we have to pass token/endpoint here because the current endpoint and + # version negotiation in OSC means just --os-identity-api-version=3 won't work + KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" + openstack --os-token $OS_SERVICE_TOKEN --os-url=$KS_ENDPOINT_V3 \ + --os-identity-api-version=3 domain create heat \ + --description "Owns users and projects created by heat" +} + # Restore xtrace $XTRACE diff --git a/stack.sh b/stack.sh index c153132485..824982e4c6 100755 --- a/stack.sh +++ b/stack.sh @@ -938,6 +938,10 @@ if is_service_enabled key; then create_swift_accounts fi + if is_service_enabled heat; then + create_heat_accounts + fi + # ``keystone_data.sh`` creates services, admin and demo users, and roles. ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ From 351173624c4a3e24aa479c6ce5f557732bff40e7 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Mon, 17 Feb 2014 18:38:07 +0400 Subject: [PATCH 0152/4119] Improve savanna-dashboard installation * split configurations setting to one-per-line; * don't set SAVANNA_URL in horizon configs - we're now using endpoits keystone to find corresponding edpoint. Change-Id: I9497a511656a2f70e923b651c66c5ef2917a0939 --- lib/savanna-dashboard | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard index 7713a78637..691b23f6e8 100644 --- a/lib/savanna-dashboard +++ b/lib/savanna-dashboard @@ -37,8 +37,9 @@ SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient function configure_savanna_dashboard() { - echo -e "SAVANNA_URL = \"http://$SERVICE_HOST:8386/v1.1\"\nAUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py - echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)\nINSTALLED_APPS += ('savannadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py + echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py + echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)" >> $HORIZON_DIR/openstack_dashboard/settings.py + echo -e "INSTALLED_APPS += ('savannadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py if is_service_enabled neutron; then echo -e "SAVANNA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py From d8864feae93f898f043febf0b4734f0b61c602d4 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 17 Feb 2014 11:00:42 -0600 Subject: [PATCH 0153/4119] Fix shocco errors Clean up comments to fix errors seen while processing with shocco Change-Id: I0e97ad27613313f03e47c107051ea93b115d4744 --- driver_certs/cinder_driver_cert.sh | 1 + functions | 7 ++++++- lib/apache | 4 ++-- lib/marconi | 3 ++- lib/stackforge | 5 +++-- tools/create_userrc.sh | 4 +--- tools/fixup_stuff.sh | 3 ++- 7 files changed, 17 insertions(+), 10 deletions(-) diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh index 99b2c8e899..e45b7f8736 100755 --- a/driver_certs/cinder_driver_cert.sh +++ b/driver_certs/cinder_driver_cert.sh @@ -16,6 +16,7 @@ # It also assumes default install location (/opt/stack/xxx) # to aid in debug, you should also verify that you've added # an output directory for screen logs: +# # SCREEN_LOGDIR=/opt/stack/screen-logs CERT_DIR=$(cd $(dirname "$0") && pwd) diff --git a/functions b/functions index 5eae7fe510..6979c6c155 100644 --- a/functions +++ b/functions @@ -2,10 +2,15 @@ # # The following variables are assumed to be defined by certain functions: # +# - ``DATABASE_BACKENDS`` # - ``ENABLED_SERVICES`` # - ``FILES`` # - ``GLANCE_HOSTPORT`` +# - ``REQUIREMENTS_DIR`` +# - ``STACK_USER`` # - ``TRACK_DEPENDS`` +# - ``UNDO_REQUIREMENTS`` +# # Include the common functions FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) @@ -45,7 +50,7 @@ function cleanup_tmp { # Updates the dependencies in project_dir from the # openstack/requirements global list before installing anything. # -# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR`` +# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``, ``UNDO_REQUIREMENTS`` # setup_develop directory function setup_develop() { local project_dir=$1 diff --git a/lib/apache b/lib/apache index 8ae78b2181..0e5712f56b 100644 --- a/lib/apache +++ b/lib/apache @@ -4,8 +4,8 @@ # Dependencies: # # - ``functions`` file -# -``STACK_USER`` must be defined - +# - ``STACK_USER`` must be defined +# # lib/apache exports the following functions: # # - is_apache_enabled_service diff --git a/lib/marconi b/lib/marconi index 88312cb1bd..cc33aebd2b 100644 --- a/lib/marconi +++ b/lib/marconi @@ -2,7 +2,8 @@ # Install and start **Marconi** service # To enable a minimal set of Marconi services, add the following to localrc: -# enable_service marconi-server +# +# enable_service marconi-server # # Dependencies: # - functions diff --git a/lib/stackforge b/lib/stackforge index 718b818ff6..5fa4570b74 100644 --- a/lib/stackforge +++ b/lib/stackforge @@ -6,8 +6,9 @@ # This is appropriate for python libraries that release to pypi and are # expected to be used beyond OpenStack like, but are requirements # for core services in global-requirements. -# * wsme -# * pecan +# +# * wsme +# * pecan # # This is not appropriate for stackforge projects which are early stage # OpenStack tools diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index d9c93cc476..c4eb8d4581 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -54,9 +54,7 @@ $0 -P -C mytenant -u myuser -p mypass EOF } -if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,os-cacert:,help,debug -- "$@") -then - #parse error +if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,os-cacert:,help,debug -- "$@"); then display_help exit 1 fi diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index a28e10ef2d..47b0cd10cd 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -70,7 +70,8 @@ if [[ -d $dir ]]; then fi # Ubuntu 12.04 -# ----- +# ------------ + # We can regularly get kernel crashes on the 12.04 default kernel, so attempt # to install a new kernel if [[ ${DISTRO} =~ (precise) ]]; then From b72235611d9659a49caf87b2cc89f05fce27a3e0 Mon Sep 17 00:00:00 2001 From: Daniel Salinas Date: Sun, 16 Feb 2014 18:57:20 -0600 Subject: [PATCH 0154/4119] Fixed missing backslash in lib/trove This is breaking the installation of trove with devstack Change-Id: I8b59d96072da47b8be5000eda835258654230b0f Closes-Bug: 1280915 --- lib/trove | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/trove b/lib/trove index 5e1bbd548d..6834149c64 100644 --- a/lib/trove +++ b/lib/trove @@ -86,7 +86,7 @@ create_trove_accounts() { --project $SERVICE_TENANT \ --user $TROVE_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - TROVE_SERVICE=$(openstack service create + TROVE_SERVICE=$(openstack service create \ trove \ --type=database \ --description="Trove Service" \ From 18d5c833d47e41c8c8dcd73f35268d6e2b43df5b Mon Sep 17 00:00:00 2001 From: Ryu Ishimoto Date: Wed, 19 Feb 2014 00:33:46 +0900 Subject: [PATCH 0155/4119] Remove provider router configuration To be compatible with the Icehouse release of MidoNet, the provider router configuration is removed from devstack since it is no longer necessary to configure it. Change-Id: I4be2d9bbf2c82fd375702cbb1d60c3277086134f Implements: blueprint remove-provider-router-config-for-midonet --- lib/neutron_plugins/midonet | 11 ++++++----- lib/neutron_thirdparty/midonet | 19 ++----------------- 2 files changed, 8 insertions(+), 22 deletions(-) diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet index f95fcb75b9..dd3b2baeca 100644 --- a/lib/neutron_plugins/midonet +++ b/lib/neutron_plugins/midonet @@ -1,6 +1,10 @@ # Neutron MidoNet plugin # ---------------------- +MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet} +MIDONET_API_PORT=${MIDONET_API_PORT:-8080} +MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-api} + # Save trace setting MY_XTRACE=$(set +o | grep xtrace) set +o xtrace @@ -47,8 +51,8 @@ function neutron_plugin_configure_plugin_agent() { } function neutron_plugin_configure_service() { - if [[ "$MIDONET_API_URI" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE MIDONET midonet_uri $MIDONET_API_URI + if [[ "$MIDONET_API_URL" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE MIDONET midonet_uri $MIDONET_API_URL fi if [[ "$MIDONET_USERNAME" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE MIDONET username $MIDONET_USERNAME @@ -59,9 +63,6 @@ function neutron_plugin_configure_service() { if [[ "$MIDONET_PROJECT_ID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE MIDONET project_id $MIDONET_PROJECT_ID fi - if [[ "$MIDONET_PROVIDER_ROUTER_ID" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $MIDONET_PROVIDER_ROUTER_ID - fi Q_L3_ENABLED=True Q_L3_ROUTER_PER_TENANT=True diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet index e672528a2d..98be4254fc 100644 --- a/lib/neutron_thirdparty/midonet +++ b/lib/neutron_thirdparty/midonet @@ -10,20 +10,12 @@ # MidoNet devstack destination dir MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet} -MIDONET_API_PORT=${MIDONET_API_PORT:-8080} -MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-api} # MidoNet client repo MIDONET_CLIENT_REPO=${MIDONET_CLIENT_REPO:-https://github.com/midokura/python-midonetclient.git} MIDONET_CLIENT_BRANCH=${MIDONET_CLIENT_BRANCH:-master} MIDONET_CLIENT_DIR=${MIDONET_CLIENT_DIR:-$MIDONET_DIR/python-midonetclient} -# MidoNet OpenStack repo -MIDONET_OS_REPO=${MIDONET_OS_REPO:-https://github.com/midokura/midonet-openstack.git} -MIDONET_OS_BRANCH=${MIDONET_OS_BRANCH:-master} -MIDONET_OS_DIR=${MIDONET_OS_DIR:-$MIDONET_DIR/midonet-openstack} -MIDONET_SETUP_SCRIPT=${MIDONET_SETUP_SCRIPT:-$MIDONET_OS_DIR/bin/setup_midonet_topology.py} - # Save trace setting MY_XTRACE=$(set +o | grep xtrace) set +o xtrace @@ -33,19 +25,12 @@ function configure_midonet() { } function init_midonet() { - - # Initialize DB. Evaluate the output of setup_midonet_topology.py to set - # env variables for provider router ID. - eval `python $MIDONET_SETUP_SCRIPT $MIDONET_API_URL admin $ADMIN_PASSWORD admin provider_devices` - die_if_not_set $LINENO provider_router_id "Error running midonet setup script, provider_router_id was not set." - - iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $provider_router_id + : } function install_midonet() { git_clone $MIDONET_CLIENT_REPO $MIDONET_CLIENT_DIR $MIDONET_CLIENT_BRANCH - git_clone $MIDONET_OS_REPO $MIDONET_OS_DIR $MIDONET_OS_BRANCH - export PYTHONPATH=$MIDONET_CLIENT_DIR/src:$MIDONET_OS_DIR/src:$PYTHONPATH + export PYTHONPATH=$MIDONET_CLIENT_DIR/src:$PYTHONPATH } function start_midonet() { From 2dcc77422348e55b6f7028679647cfbdf872f6a2 Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Tue, 18 Feb 2014 13:45:18 -0500 Subject: [PATCH 0156/4119] Add retry to connect to mongo db This patch adds retries to connect to the mongodb, after a restart. Change-Id: I16e37614736c247fa0b737db2b868c052c2aa33a --- lib/marconi | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index 88312cb1bd..b6ce57a295 100644 --- a/lib/marconi +++ b/lib/marconi @@ -68,7 +68,9 @@ function is_marconi_enabled { # cleanup_marconi() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_marconi() { - mongo marconi --eval "db.dropDatabase();" + if ! timeout $SERVICE_TIMEOUT sh -c "while ! mongo marconi --eval 'db.dropDatabase();'; do sleep 1; done"; then + die $LINENO "Mongo DB did not start" + fi } # configure_marconiclient() - Set config files, create data dirs, etc From de2057290a368e339cb66a8a61d483c90f964089 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Wed, 19 Feb 2014 14:00:42 +0400 Subject: [PATCH 0157/4119] Improve savanna keystone auth configuration We're doing to use common keystone configuration approach - section keystone_authtoken with config opts from the python-keystoneclient auth_token middleware. Change-Id: Ibbe0c76ee3b00045f5cb5134bd7661e9cef6ccdd --- extras.d/70-savanna.sh | 5 +++++ lib/savanna | 29 +++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/extras.d/70-savanna.sh b/extras.d/70-savanna.sh index 6bbe113fa7..edc1376deb 100644 --- a/extras.d/70-savanna.sh +++ b/extras.d/70-savanna.sh @@ -8,6 +8,7 @@ if is_service_enabled savanna; then elif [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Savanna" install_savanna + cleanup_savanna if is_service_enabled horizon; then install_savanna_dashboard fi @@ -29,4 +30,8 @@ if is_service_enabled savanna; then cleanup_savanna_dashboard fi fi + + if [[ "$1" == "clean" ]]; then + cleanup_savanna + fi fi diff --git a/lib/savanna b/lib/savanna index 43c5e386fe..954f0e711e 100644 --- a/lib/savanna +++ b/lib/savanna @@ -10,6 +10,7 @@ # configure_savanna # start_savanna # stop_savanna +# cleanup_savanna # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -33,6 +34,8 @@ SAVANNA_SERVICE_HOST=${SAVANNA_SERVICE_HOST:-$SERVICE_HOST} SAVANNA_SERVICE_PORT=${SAVANNA_SERVICE_PORT:-8386} SAVANNA_SERVICE_PROTOCOL=${SAVANNA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +SAVANNA_AUTH_CACHE_DIR=${SAVANNA_AUTH_CACHE_DIR:-/var/cache/savanna} + # Support entry points installation of console scripts if [[ -d $SAVANNA_DIR/bin ]]; then SAVANNA_BIN_DIR=$SAVANNA_DIR/bin @@ -83,6 +86,14 @@ function create_savanna_accounts() { fi } +# cleanup_savanna() - Remove residual data files, anything left over from +# previous runs that would need to clean up. +function cleanup_savanna() { + + # Cleanup auth cache dir + sudo rm -rf $SAVANNA_AUTH_CACHE_DIR +} + # configure_savanna() - Set config files, create data dirs, etc function configure_savanna() { @@ -94,9 +105,27 @@ function configure_savanna() { # Copy over savanna configuration file and configure common parameters. cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_FILE + # Create auth cache dir + sudo mkdir -p $SAVANNA_AUTH_CACHE_DIR + sudo chown $STACK_USER $SAVANNA_AUTH_CACHE_DIR + rm -rf $SAVANNA_AUTH_CACHE_DIR/* + + # Set obsolete keystone auth configs for backward compatibility + iniset $SAVANNA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_SERVICE_HOST + iniset $SAVANNA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_SERVICE_PORT + iniset $SAVANNA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_SERVICE_PROTOCOL iniset $SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD iniset $SAVANNA_CONF_FILE DEFAULT os_admin_username savanna iniset $SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME + + # Set actual keystone auth configs + iniset $SAVANNA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset $SAVANNA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $SAVANNA_CONF_FILE keystone_authtoken admin_user savanna + iniset $SAVANNA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD + iniset $SAVANNA_CONF_FILE keystone_authtoken signing_dir $SAVANNA_AUTH_CACHE_DIR + iniset $SAVANNA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA + iniset $SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG iniset $SAVANNA_CONF_FILE database connection `database_connection_url savanna` From 27f29440d1b6f5343e02b8beff04c21882139ce7 Mon Sep 17 00:00:00 2001 From: Brett Campbell Date: Wed, 19 Feb 2014 18:23:16 -0800 Subject: [PATCH 0158/4119] Set umask Ensure we have a known-good umask. Otherwise files such as /etc/polkit-1/rules.d/50-libvirt-$STACK_USER.rules may not be readable by non-root users afterwards. Also reworded some comments to be more clear. Change-Id: I7653d4eee062cf32df22aa158da6269b1aa9a558 Closes-Bug: #1265195 --- stack.sh | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/stack.sh b/stack.sh index 4a55225685..a5d66cc8e8 100755 --- a/stack.sh +++ b/stack.sh @@ -5,11 +5,12 @@ # **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron**, # and **Swift** -# This script allows you to specify configuration options of what git -# repositories to use, enabled services, network configuration and various -# passwords. If you are crafty you can run the script on multiple nodes using -# shared settings for common resources (mysql, rabbitmq) and build a multi-node -# developer install. +# This script's options can be changed by setting appropriate environment +# variables. You can configure things like which git repositories to use, +# services to enable, OS images to use, etc. Default values are located in the +# ``stackrc`` file. If you are crafty you can run the script on multiple nodes +# using shared settings for common resources (eg., mysql or rabbitmq) and build +# a multi-node developer install. # To keep this script simple we assume you are running on a recent **Ubuntu** # (12.04 Precise or newer) or **Fedora** (F18 or newer) machine. (It may work @@ -30,6 +31,9 @@ unset LANGUAGE LC_ALL=C export LC_ALL +# Make sure umask is sane +umask 022 + # Keep track of the devstack directory TOP_DIR=$(cd $(dirname "$0") && pwd) From f6368d3eaccc33d5afdbc53a34bf6e37b6e11eb8 Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Thu, 20 Feb 2014 13:31:26 +0900 Subject: [PATCH 0159/4119] Fix comments about System Functions This commit fixes comments about "System Functions". * Add a missing comment about System Functions in the header * Fix singular to plural like others Change-Id: I3feb94cd11a6683ca80093574d60fdf7420e3af2 --- functions-common | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index d92e39cd91..eb9b4ac8bb 100644 --- a/functions-common +++ b/functions-common @@ -15,6 +15,7 @@ # - Process Functions # - Python Functions # - Service Functions +# - System Functions # # The following variables are assumed to be defined by certain functions: # @@ -1280,8 +1281,8 @@ function use_exclusive_service { } -# System Function -# =============== +# System Functions +# ================ # Only run the command if the target file (the last arg) is not on an # NFS filesystem. From 1958c1eb5e3521a70a3cf4185a177da7d17d83e9 Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Thu, 20 Feb 2014 14:32:15 +0900 Subject: [PATCH 0160/4119] Remove unnecessary comment out lines This commit removes some comment-outed codes. If we want to use them, we can get them from the git repository. Change-Id: Ie438c43d332d0631750f0ad458653fc40e23faad --- clean.sh | 9 --------- tools/info.sh | 2 -- tools/xen/build_domU_multi.sh | 6 ------ 3 files changed, 17 deletions(-) diff --git a/clean.sh b/clean.sh index 09f08dc8c2..b2a9405c88 100755 --- a/clean.sh +++ b/clean.sh @@ -101,11 +101,6 @@ if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; th cleanup_nova_hypervisor fi -#if mount | grep $DATA_DIR/swift/drives; then -# sudo umount $DATA_DIR/swift/drives/sdb1 -#fi - - # Clean out /etc sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift @@ -123,9 +118,5 @@ if [[ -n "$SCREEN_LOGDIR" ]] && [[ -d "$SCREEN_LOGDIR" ]]; then sudo rm -rf $SCREEN_LOGDIR fi -# Clean up networking... -# should this be in nova? -# FIXED_IP_ADDR in br100 - # Clean up files rm -f $TOP_DIR/.stackenv diff --git a/tools/info.sh b/tools/info.sh index 3ab7966ab4..1e521b9c4b 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -122,13 +122,11 @@ while read line; do ver=${BASH_REMATCH[2]} else # Unhandled format in freeze file - #echo "unknown: $p" continue fi echo "pip|${p}|${ver}" else # No match in freeze file - #echo "unknown: $p" continue fi done <$FREEZE_FILE diff --git a/tools/xen/build_domU_multi.sh b/tools/xen/build_domU_multi.sh index 0285f42e42..0eb2077414 100755 --- a/tools/xen/build_domU_multi.sh +++ b/tools/xen/build_domU_multi.sh @@ -25,11 +25,5 @@ function build_xva { # because rabbit won't launch with an ip addr hostname :( build_xva HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit" -# Wait till the head node is up -#while ! curl -L http://$HEAD_PUB_IP | grep -q username; do -# echo "Waiting for head node ($HEAD_PUB_IP) to start..." -# sleep 5 -#done - # Build the HA compute host build_xva COMPUTENODE $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api" From 3d60f4dd531388cd01a3aa689053dfc22acbd16c Mon Sep 17 00:00:00 2001 From: Giulio Fidente Date: Thu, 20 Feb 2014 16:43:49 +0100 Subject: [PATCH 0161/4119] Disable tempest backup tests if c-bak unavailable This will update the tempest config to not run the cinder backup tests when the c-bak service is not enabled. Change-Id: I0b6486f1222afa7ae9bd9d13c7d3648d2b870710 --- lib/tempest | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index c8eebfcf05..596750b32f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -314,8 +314,8 @@ function configure_tempest() { iniset $TEMPEST_CONFIG scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} # Volume - if is_service_enabled c-bak; then - iniset $TEMPEST_CONFIG volume volume_backup_enabled "True" + if ! is_service_enabled c-bak; then + iniset $TEMPEST_CONFIG volume-feature-enabled backup False fi CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND) if [ $CINDER_MULTI_LVM_BACKEND == "True" ]; then From 2d65059e725ad27d1e9bdddbea9982d1d8027c01 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 20 Feb 2014 15:49:13 +0100 Subject: [PATCH 0162/4119] Add RHEL7 beta support RHEL7 still in beta status, so it will require the FORCE option, until the GA release. The main notable difference from another RHEL family members, it does not have the mysql alias for the mariadb. Change-Id: Ic90bb6c3dd9447fc80453c3dc1adb22cdfc6226f --- files/rpms/cinder | 2 +- files/rpms/glance | 4 ++-- files/rpms/neutron | 4 ++-- files/rpms/nova | 8 ++++---- files/rpms/swift | 2 +- lib/databases/mysql | 18 +++++++++++++++--- 6 files changed, 25 insertions(+), 13 deletions(-) diff --git a/files/rpms/cinder b/files/rpms/cinder index 623c13e676..199ae10b79 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -4,4 +4,4 @@ qemu-img python-devel postgresql-devel iscsi-initiator-utils -python-lxml #dist:f18,f19,f20 +python-lxml #dist:f18,f19,f20,rhel7 diff --git a/files/rpms/glance b/files/rpms/glance index fffd9c85b4..785ce25df5 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -9,8 +9,8 @@ python-argparse python-devel python-eventlet python-greenlet -python-lxml #dist:f18,f19,f20 -python-paste-deploy #dist:f18,f19,f20 +python-lxml #dist:f18,f19,f20,rhel7 +python-paste-deploy #dist:f18,f19,f20,rhel7 python-routes python-sqlalchemy python-wsgiref diff --git a/files/rpms/neutron b/files/rpms/neutron index 67bf52350a..42d7f68d37 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron @@ -11,8 +11,8 @@ python-greenlet python-iso8601 python-kombu #rhel6 gets via pip -python-paste # dist:f18,f19,f20 -python-paste-deploy # dist:f18,f19,f20 +python-paste # dist:f18,f19,f20,rhel7 +python-paste-deploy # dist:f18,f19,f20,rhel7 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/nova b/files/rpms/nova index ac70ac5d6f..a607d925e1 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -28,11 +28,11 @@ python-kombu python-lockfile python-migrate python-mox -python-paramiko # dist:f18,f19,f20 -# ^ on RHEL, brings in python-crypto which conflicts with version from +python-paramiko # dist:f18,f19,f20,rhel7 +# ^ on RHEL6, brings in python-crypto which conflicts with version from # pip we need -python-paste # dist:f18,f19,f20 -python-paste-deploy # dist:f18,f19,f20 +python-paste # dist:f18,f19,f20,rhel7 +python-paste-deploy # dist:f18,f19,f20,rhel7 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/swift b/files/rpms/swift index 32432bca9b..72253f7752 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -9,7 +9,7 @@ python-eventlet python-greenlet python-netifaces python-nose -python-paste-deploy # dist:f18,f19,f20 +python-paste-deploy # dist:f18,f19,f20,rhel7 python-simplejson python-webob pyxattr diff --git a/lib/databases/mysql b/lib/databases/mysql index 476b4b91b7..31e7163033 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -25,7 +25,11 @@ function cleanup_database_mysql { sudo rm -rf /var/lib/mysql return elif is_fedora; then - MYSQL=mysqld + if [[ $DISTRO =~ (rhel7) ]]; then + MYSQL=mariadb + else + MYSQL=mysqld + fi elif is_suse; then MYSQL=mysql else @@ -48,8 +52,12 @@ function configure_database_mysql { MY_CONF=/etc/mysql/my.cnf MYSQL=mysql elif is_fedora; then + if [[ $DISTRO =~ (rhel7) ]]; then + MYSQL=mariadb + else + MYSQL=mysqld + fi MY_CONF=/etc/my.cnf - MYSQL=mysqld elif is_suse; then MY_CONF=/etc/my.cnf MYSQL=mysql @@ -135,7 +143,11 @@ EOF fi # Install mysql-server if is_ubuntu || is_fedora; then - install_package mysql-server + if [[ $DISTRO =~ (rhel7) ]]; then + install_package mariadb-server + else + install_package mysql-server + fi elif is_suse; then if ! is_package_installed mariadb; then install_package mysql-community-server From 09bb9e67923c1de4d4479000eb329b139732c57b Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 21 Feb 2014 14:33:29 +1100 Subject: [PATCH 0163/4119] Add more files to run_tests.sh bash8 check Add functions-common, stackrc, openrc, exerciserc, eucarc to bash8 checks Change-Id: Ic14b348c871bf98bf35c7e866e715bb75bdccf97 --- run_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run_tests.sh b/run_tests.sh index 9d9d18661e..b4f26c5709 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -20,7 +20,7 @@ if [[ -n $@ ]]; then else LIBS=`find lib -type f | grep -v \.md` SCRIPTS=`find . -type f -name \*\.sh` - EXTRA="functions" + EXTRA="functions functions-common stackrc openrc exerciserc eucarc" FILES="$SCRIPTS $LIBS $EXTRA" fi From f8e86bb3129c6aa5cb9c70ceb2a55f01b2dd1bf0 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 21 Feb 2014 15:16:31 +1100 Subject: [PATCH 0164/4119] Un-nest generate_swift_config I think this got accidentally nested during some code refactorizing? Change-Id: Ie486cf3395b6acf3a10eb32e116d39ca56134b9f --- lib/swift | 79 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 40 insertions(+), 39 deletions(-) diff --git a/lib/swift b/lib/swift index df586abe8b..6c33af5082 100644 --- a/lib/swift +++ b/lib/swift @@ -231,6 +231,46 @@ function _config_swift_apache_wsgi() { done } +# This function generates an object/container/account configuration +# emulating 4 nodes on different ports +function generate_swift_config() { + local swift_node_config=$1 + local node_id=$2 + local bind_port=$3 + local server_type=$4 + + log_facility=$[ node_id - 1 ] + node_path=${SWIFT_DATA_DIR}/${node_number} + + iniuncomment ${swift_node_config} DEFAULT user + iniset ${swift_node_config} DEFAULT user ${STACK_USER} + + iniuncomment ${swift_node_config} DEFAULT bind_port + iniset ${swift_node_config} DEFAULT bind_port ${bind_port} + + iniuncomment ${swift_node_config} DEFAULT swift_dir + iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONF_DIR} + + iniuncomment ${swift_node_config} DEFAULT devices + iniset ${swift_node_config} DEFAULT devices ${node_path} + + iniuncomment ${swift_node_config} DEFAULT log_facility + iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} + + iniuncomment ${swift_node_config} DEFAULT workers + iniset ${swift_node_config} DEFAULT workers 1 + + iniuncomment ${swift_node_config} DEFAULT disable_fallocate + iniset ${swift_node_config} DEFAULT disable_fallocate true + + iniuncomment ${swift_node_config} DEFAULT mount_check + iniset ${swift_node_config} DEFAULT mount_check false + + iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode + iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes +} + + # configure_swift() - Set config files, create data dirs and loop image function configure_swift() { local swift_pipeline="${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH}" @@ -364,45 +404,6 @@ EOF cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONF_DIR}/swift.conf iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH} - # This function generates an object/container/account configuration - # emulating 4 nodes on different ports - function generate_swift_config() { - local swift_node_config=$1 - local node_id=$2 - local bind_port=$3 - local server_type=$4 - - log_facility=$[ node_id - 1 ] - node_path=${SWIFT_DATA_DIR}/${node_number} - - iniuncomment ${swift_node_config} DEFAULT user - iniset ${swift_node_config} DEFAULT user ${STACK_USER} - - iniuncomment ${swift_node_config} DEFAULT bind_port - iniset ${swift_node_config} DEFAULT bind_port ${bind_port} - - iniuncomment ${swift_node_config} DEFAULT swift_dir - iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONF_DIR} - - iniuncomment ${swift_node_config} DEFAULT devices - iniset ${swift_node_config} DEFAULT devices ${node_path} - - iniuncomment ${swift_node_config} DEFAULT log_facility - iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} - - iniuncomment ${swift_node_config} DEFAULT workers - iniset ${swift_node_config} DEFAULT workers 1 - - iniuncomment ${swift_node_config} DEFAULT disable_fallocate - iniset ${swift_node_config} DEFAULT disable_fallocate true - - iniuncomment ${swift_node_config} DEFAULT mount_check - iniset ${swift_node_config} DEFAULT mount_check false - - iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode - iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes - } - for node_number in ${SWIFT_REPLICAS_SEQ}; do swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config} From 8e1a1ffdfbf59e01688fd2e6e007ab72d49263ed Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Fri, 21 Feb 2014 14:45:48 +0000 Subject: [PATCH 0165/4119] Set stack_user_domain config correctly The recently merged patch which creates a domain for heat fails to correctly set the domain ID in heat.conf, so move the setting of the config option to immediately after we create the domain. Also add the missing DEFAULT section identifier in the iniset, and use OS_TOKEN instead of OS_SERVICE token, because the stack.sh comment says this is exported for the openstackclient workaround. Change-Id: I912f774f1215d68cbcfe44229b371f318d92966a Closes-Bug: #1283075 --- lib/heat | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/lib/heat b/lib/heat index efb01ef3b8..af10fa6f1d 100644 --- a/lib/heat +++ b/lib/heat @@ -110,15 +110,6 @@ function configure_heat() { iniset $HEAT_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 iniset $HEAT_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens - # stack user domain - # Note we have to pass token/endpoint here because the current endpoint and - # version negotiation in OSC means just --os-identity-api-version=3 won't work - KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" - D_ID=$(openstack --os-token $OS_SERVICE_TOKEN --os-url=$KS_ENDPOINT_V3 \ - --os-identity-api-version=3 domain show heat \ - | grep ' id ' | get_field 2) - iniset $HEAT_CONF stack_user_domain ${D_ID} - # paste_deploy [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_CONF paste_deploy flavor standalone @@ -211,9 +202,11 @@ function create_heat_accounts() { # Note we have to pass token/endpoint here because the current endpoint and # version negotiation in OSC means just --os-identity-api-version=3 won't work KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" - openstack --os-token $OS_SERVICE_TOKEN --os-url=$KS_ENDPOINT_V3 \ + D_ID=$(openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \ --os-identity-api-version=3 domain create heat \ - --description "Owns users and projects created by heat" + --description "Owns users and projects created by heat" \ + | grep ' id ' | get_field 2) + iniset $HEAT_CONF DEFAULT stack_user_domain ${D_ID} } # Restore xtrace From f2ca87a8d8ded80384b2cafb46ef2ca4cf19a986 Mon Sep 17 00:00:00 2001 From: Rabi Mishra Date: Fri, 21 Feb 2014 20:08:28 +0530 Subject: [PATCH 0166/4119] Implements fix to run lbaas service on fedora with devstack changes 'user_group = nobody' in 'haproxy' section of lbaas_agent.ini Change-Id: I801fec5a11d8abd97cb6f5cdff35fabb9eaf9000 Closes-Bug: 1283064 --- lib/neutron_plugins/services/loadbalancer | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer index 5d7a94e5d8..3714142a83 100644 --- a/lib/neutron_plugins/services/loadbalancer +++ b/lib/neutron_plugins/services/loadbalancer @@ -38,6 +38,7 @@ function neutron_agent_lbaas_configure_agent() { if is_fedora; then iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT user_group "nobody" + iniset $LBAAS_AGENT_CONF_FILENAME haproxy user_group "nobody" fi } From 67df3b2fc2b2e7b1cfb0418e59f96db7561277be Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Thu, 20 Feb 2014 14:48:59 -0500 Subject: [PATCH 0167/4119] Bind Marconi to SERVICE_HOST & add health check This patch, 1. Binds Marconi to SERVICE_HOST, to be consistent with other services. 2. Adds a health check to verify if marconi started correctly. Change-Id: I1d48d0e610369cc97d479a5cd47b2bd11656da3f --- lib/marconi | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/lib/marconi b/lib/marconi index b6ce57a295..ee7bf0ec7b 100644 --- a/lib/marconi +++ b/lib/marconi @@ -51,6 +51,11 @@ MARCONI_BRANCH=${MARCONI_BRANCH:-master} MARCONICLIENT_REPO=${MARCONICLIENT_REPO:-${GIT_BASE}/openstack/python-marconiclient.git} MARCONICLIENT_BRANCH=${MARCONICLIENT_BRANCH:-master} +# Set Marconi Connection Info +MARCONI_SERVICE_HOST=${MARCONI_SERVICE_HOST:-$SERVICE_HOST} +MARCONI_SERVICE_PORT=${MARCONI_SERVICE_PORT:-8888} +MARCONI_SERVICE_PROTOCOL=${MARCONI_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} + # Tell Tempest this project is present TEMPEST_SERVICES+=,marconi @@ -89,7 +94,7 @@ function configure_marconi() { sudo chown $USER $MARCONI_API_LOG_DIR iniset $MARCONI_CONF DEFAULT verbose True - iniset $MARCONI_CONF 'drivers:transport:wsgi' bind '0.0.0.0' + iniset $MARCONI_CONF 'drivers:transport:wsgi' bind $MARCONI_SERVICE_HOST iniset $MARCONI_CONF keystone_authtoken auth_protocol http iniset $MARCONI_CONF keystone_authtoken admin_user marconi @@ -142,6 +147,10 @@ function install_marconiclient() { # start_marconi() - Start running processes, including screen function start_marconi() { screen_it marconi-server "marconi-server --config-file $MARCONI_CONF" + echo "Waiting for Marconi to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT/v1/health; do sleep 1; done"; then + die $LINENO "Marconi did not start" + fi } # stop_marconi() - Stop running processes @@ -176,9 +185,9 @@ function create_marconi_accounts() { openstack endpoint create \ $MARCONI_SERVICE \ --region RegionOne \ - --publicurl "http://$SERVICE_HOST:8888" \ - --adminurl "http://$SERVICE_HOST:8888" \ - --internalurl "http://$SERVICE_HOST:8888" + --publicurl "$MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT" \ + --adminurl "$MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT" \ + --internalurl "$MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT" fi } From f5aa05c0ab1e1ae0c9f56d5eaf9164adcd4cd7b9 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Fri, 21 Feb 2014 22:03:59 -0500 Subject: [PATCH 0168/4119] Add support for oslo.vmware Change-Id: I2162a339b1869c27850afcda6be3c4e11de94e0e --- lib/oslo | 4 ++++ stackrc | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/lib/oslo b/lib/oslo index b089842ae4..516ce1c3a9 100644 --- a/lib/oslo +++ b/lib/oslo @@ -24,6 +24,7 @@ CLIFF_DIR=$DEST/cliff OSLOCFG_DIR=$DEST/oslo.config OSLOMSG_DIR=$DEST/oslo.messaging OSLORWRAP_DIR=$DEST/oslo.rootwrap +OSLOVMWARE_DIR=$DEST/oslo.vmware PYCADF_DIR=$DEST/pycadf STEVEDORE_DIR=$DEST/stevedore TASKFLOW_DIR=$DEST/taskflow @@ -49,6 +50,9 @@ function install_oslo() { git_clone $OSLORWRAP_REPO $OSLORWRAP_DIR $OSLORWRAP_BRANCH setup_develop $OSLORWRAP_DIR + git_clone $OSLOVMWARE_REPO $OSLOVMWARE_DIR $OSLOVMWARE_BRANCH + setup_develop $OSLOVMWARE_DIR + git_clone $PYCADF_REPO $PYCADF_DIR $PYCADF_BRANCH setup_develop $PYCADF_DIR diff --git a/stackrc b/stackrc index 0b081c4014..91f5751966 100644 --- a/stackrc +++ b/stackrc @@ -167,6 +167,10 @@ OSLOMSG_BRANCH=${OSLOMSG_BRANCH:-master} OSLORWRAP_REPO=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git} OSLORWRAP_BRANCH=${OSLORWRAP_BRANCH:-master} +# oslo.vmware +OSLOVMWARE_REPO=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git} +OSLOVMWARE_BRANCH=${OSLOVMWARE_BRANCH:-master} + # pycadf auditing library PYCADF_REPO=${PYCADF_REPO:-${GIT_BASE}/openstack/pycadf.git} PYCADF_BRANCH=${PYCADF_BRANCH:-master} From d53ad0b07d3e7bdd2668c2d3f1815d95d4b8f532 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 20 Feb 2014 13:55:13 +1100 Subject: [PATCH 0169/4119] Add GIT_TIMEOUT variable to watch git operations During my CI testing of each devstack change I can often see git get itself stuck and hang indefinitely. I'm not sure if it's transient network issues, or issues at the remote end (seen with both github.com and git.openstack.org) but it hits fairly frequently. Retrying the command usually gets it going again. Searching for "git hanging" and similar shows its not entirely uncommon... This adds a watchdog timeout for remote git operations based on a new environment variable GIT_TIMEOUT. It will retry 3 times before giving up. The wrapper is applied to the main remote git calls. Change-Id: I5b0114ca26b7ac2f25993264f761cba9ec8c09e1 --- functions-common | 41 ++++++++++++++++++++++++++++++++++++----- stackrc | 11 +++++++++++ 2 files changed, 47 insertions(+), 5 deletions(-) diff --git a/functions-common b/functions-common index d92e39cd91..9cd5acd47b 100644 --- a/functions-common +++ b/functions-common @@ -498,16 +498,16 @@ function git_clone { if [[ ! -d $GIT_DEST ]]; then [[ "$ERROR_ON_CLONE" = "True" ]] && \ die $LINENO "Cloning not allowed in this configuration" - git clone $GIT_REMOTE $GIT_DEST + git_timed clone $GIT_REMOTE $GIT_DEST fi cd $GIT_DEST - git fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD + git_timed fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD else # do a full clone only if the directory doesn't exist if [[ ! -d $GIT_DEST ]]; then [[ "$ERROR_ON_CLONE" = "True" ]] && \ die $LINENO "Cloning not allowed in this configuration" - git clone $GIT_REMOTE $GIT_DEST + git_timed clone $GIT_REMOTE $GIT_DEST cd $GIT_DEST # This checkout syntax works for both branches and tags git checkout $GIT_REF @@ -516,7 +516,7 @@ function git_clone { cd $GIT_DEST # set the url to pull from and fetch git remote set-url origin $GIT_REMOTE - git fetch origin + git_timed fetch origin # remove the existing ignored files (like pyc) as they cause breakage # (due to the py files having older timestamps than our pyc, so python # thinks the pyc files are correct using them) @@ -541,6 +541,37 @@ function git_clone { git show --oneline | head -1 } +# git can sometimes get itself infinitely stuck with transient network +# errors or other issues with the remote end. This wraps git in a +# timeout/retry loop and is intended to watch over non-local git +# processes that might hang. GIT_TIMEOUT, if set, is passed directly +# to timeout(1); otherwise the default value of 0 maintains the status +# quo of waiting forever. +# usage: git_timed +function git_timed() { + local count=0 + local timeout=0 + + if [[ -n "${GIT_TIMEOUT}" ]]; then + timeout=${GIT_TIMEOUT} + fi + + until timeout -s SIGINT ${timeout} git "$@"; do + # 124 is timeout(1)'s special return code when it reached the + # timeout; otherwise assume fatal failure + if [[ $? -ne 124 ]]; then + die $LINENO "git call failed: [git $@]" + fi + + count=$(($count + 1)) + warn "timeout ${count} for git call: [git $@]" + if [ $count -eq 3 ]; then + die $LINENO "Maximum of 3 git retries reached" + fi + sleep 5 + done +} + # git update using reference as a branch. # git_update_branch ref function git_update_branch() { @@ -571,7 +602,7 @@ function git_update_tag() { git tag -d $GIT_TAG # fetching given tag only - git fetch origin tag $GIT_TAG + git_timed fetch origin tag $GIT_TAG git checkout -f $GIT_TAG } diff --git a/stackrc b/stackrc index 56fa40269c..8cec09eb28 100644 --- a/stackrc +++ b/stackrc @@ -69,6 +69,17 @@ fi # (currently only implemented for MySQL backend) DATABASE_QUERY_LOGGING=$(trueorfalse True $DATABASE_QUERY_LOGGING) +# Set a timeout for git operations. If git is still running when the +# timeout expires, the command will be retried up to 3 times. This is +# in the format for timeout(1); +# +# DURATION is a floating point number with an optional suffix: 's' +# for seconds (the default), 'm' for minutes, 'h' for hours or 'd' +# for days. +# +# Zero disables timeouts +GIT_TIMEOUT=${GIT_TIMEOUT:-0} + # Repositories # ------------ From b93ee25b64de5d587c2e0889a9ce689c92aaa0f9 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Sun, 23 Feb 2014 20:41:07 -0500 Subject: [PATCH 0170/4119] make bash8 take a -v flag this ensures that we actually know we are processing all the files we believe we are. Change-Id: I8e99b5f9dc987c946586475f374f7040ca63a478 --- run_tests.sh | 2 +- tools/bash8.py | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index b4f26c5709..a0bfbee0c0 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -26,4 +26,4 @@ fi echo "Running bash8..." -./tools/bash8.py $FILES +./tools/bash8.py -v $FILES diff --git a/tools/bash8.py b/tools/bash8.py index 7552e0d642..ca0abd964a 100755 --- a/tools/bash8.py +++ b/tools/bash8.py @@ -110,11 +110,13 @@ def end_of_multiline(line, token): return False -def check_files(files): +def check_files(files, verbose): in_multiline = False logical_line = "" token = False for line in fileinput.input(files): + if verbose and fileinput.isfirstline(): + print "Running bash8 on %s" % fileinput.filename() # NOTE(sdague): multiline processing of heredocs is interesting if not in_multiline: logical_line = line @@ -141,13 +143,14 @@ def get_options(): parser.add_argument('files', metavar='file', nargs='+', help='files to scan for errors') parser.add_argument('-i', '--ignore', help='Rules to ignore') + parser.add_argument('-v', '--verbose', action='store_true', default=False) return parser.parse_args() def main(): opts = get_options() register_ignores(opts.ignore) - check_files(opts.files) + check_files(opts.files, opts.verbose) if ERRORS > 0: print("%d bash8 error(s) found" % ERRORS) From 010959de403660e13eca54c6ef306ef5df24b436 Mon Sep 17 00:00:00 2001 From: Jim Rollenhagen Date: Tue, 18 Feb 2014 13:17:58 -0600 Subject: [PATCH 0171/4119] Perform safety checks in create-stack-user.sh This adds some safety checks to the stack user creation script. This includes: - Using set -o errexit to exit early on errors - Make sure STACK_USER is set before doing anything with it Change-Id: If027daddd03e32c5ba3c2ebb05ad5b27d2868b0a --- tools/create-stack-user.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh index 50f6592a3a..9c29ecd901 100755 --- a/tools/create-stack-user.sh +++ b/tools/create-stack-user.sh @@ -15,6 +15,7 @@ # and it was time for this nonsense to stop. Run this script as root to create # the user and configure sudo. +set -o errexit # Keep track of the devstack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) @@ -27,12 +28,14 @@ source $TOP_DIR/functions # and ``DISTRO`` GetDistro -# Needed to get ``ENABLED_SERVICES`` +# Needed to get ``ENABLED_SERVICES`` and ``STACK_USER`` source $TOP_DIR/stackrc # Give the non-root user the ability to run as **root** via ``sudo`` is_package_installed sudo || install_package sudo +[[ -z "$STACK_USER" ]] && die "STACK_USER is not set. Exiting." + if ! getent group $STACK_USER >/dev/null; then echo "Creating a group called $STACK_USER" groupadd $STACK_USER From e9648276a6396a630d0eca812e36fc82ec4b2a0c Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Sun, 23 Feb 2014 18:55:51 +0100 Subject: [PATCH 0172/4119] Removes the dependence with aptitude Removes the dependence with aptitude by replacing the call of: aptitude purge -y ~npackage by apt_get purge -y package* Change-Id: I08875ffad9dc6293047827666f02453a355b16ea Closes-Bug: 1281410 --- lib/databases/mysql | 2 +- lib/databases/postgresql | 2 +- lib/rpc_backend | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 476b4b91b7..3c002f7c43 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -21,7 +21,7 @@ function cleanup_database_mysql { if is_ubuntu; then # Get ruthless with mysql stop_service $MYSQL - sudo aptitude purge -y ~nmysql-server + apt_get purge -y mysql* sudo rm -rf /var/lib/mysql return elif is_fedora; then diff --git a/lib/databases/postgresql b/lib/databases/postgresql index c459feb9e0..96a5947a60 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -21,7 +21,7 @@ function cleanup_database_postgresql { stop_service postgresql if is_ubuntu; then # Get ruthless with mysql - sudo aptitude purge -y ~npostgresql + apt_get purge -y postgresql* return elif is_fedora; then uninstall_package postgresql-server diff --git a/lib/rpc_backend b/lib/rpc_backend index 3651bc0d20..34f576f5b8 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -67,7 +67,7 @@ function cleanup_rpc_backend { sudo killall epmd || sudo killall -9 epmd if is_ubuntu; then # And the Erlang runtime too - sudo aptitude purge -y ~nerlang + apt_get purge -y erlang* fi elif is_service_enabled qpid; then if is_fedora; then From 2e2b28b531e392ac59fdfa948bc79a0c74b2f332 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 19 Feb 2014 09:02:02 -0500 Subject: [PATCH 0173/4119] reset prereqs status on clean.sh when running a clean, we should really reset the prereq status as well, as this should start us back from zeroish. Change-Id: I5fae151ab13bcf7fb82feb1e91eed19e0215dc59 --- clean.sh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/clean.sh b/clean.sh index 09f08dc8c2..465b304a17 100755 --- a/clean.sh +++ b/clean.sh @@ -128,4 +128,10 @@ fi # FIXED_IP_ADDR in br100 # Clean up files -rm -f $TOP_DIR/.stackenv + +FILES_TO_CLEAN=".localrc.auto docs-files docs/ shocco/ stack-screenrc test*.conf* test.ini*" +FILES_TO_CLEAN+=".stackenv .prereqs" + +for file in FILES_TO_CLEAN; do + rm -f $TOP_DIR/$file +done From f1eb0475d9320875f1a6c4a9c398e9388350d206 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 11 Feb 2014 17:28:56 -0500 Subject: [PATCH 0174/4119] don't check for service enabled service enabled is something that's not required for sysstat and friends, because instead we actually can do this with screen_it. Change-Id: I4aa5787101cb0def46690f38a7f82effbb85f502 --- stack.sh | 52 +++++++++++++++++++++++----------------------------- 1 file changed, 23 insertions(+), 29 deletions(-) diff --git a/stack.sh b/stack.sh index 4a55225685..ce19b8fc5c 100755 --- a/stack.sh +++ b/stack.sh @@ -863,42 +863,36 @@ fi init_service_check -# Sysstat +# Sysstat and friends # ------- # If enabled, systat has to start early to track OpenStack service startup. -if is_service_enabled sysstat; then - # what we want to measure - # -u : cpu statitics - # -q : load - # -b : io load rates - # -w : process creation and context switch rates - SYSSTAT_OPTS="-u -q -b -w" - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it sysstat "cd $TOP_DIR; ./tools/sar_filter.py $SYSSTAT_OPTS -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" - else - screen_it sysstat "./tools/sar_filter.py $SYSSTAT_OPTS $SYSSTAT_INTERVAL" - fi +# what we want to measure +# -u : cpu statitics +# -q : load +# -b : io load rates +# -w : process creation and context switch rates +SYSSTAT_OPTS="-u -q -b -w" +if [[ -n ${SCREEN_LOGDIR} ]]; then + screen_it sysstat "cd $TOP_DIR; ./tools/sar_filter.py $SYSSTAT_OPTS -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" +else + screen_it sysstat "./tools/sar_filter.py $SYSSTAT_OPTS $SYSSTAT_INTERVAL" fi -if is_service_enabled dstat; then - # Per-process stats - DSTAT_OPTS="-tcndylp --top-cpu-adv" - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE" - else - screen_it dstat "dstat $DSTAT_OPTS" - fi +# A better kind of sysstat, with the top process per time slice +DSTAT_OPTS="-tcndylp --top-cpu-adv" +if [[ -n ${SCREEN_LOGDIR} ]]; then + screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE" +else + screen_it dstat "dstat $DSTAT_OPTS" fi -if is_service_enabled pidstat; then - # Per-process stats - PIDSTAT_OPTS="-l -p ALL -T ALL" - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it pidstat "cd $TOP_DIR; pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL > $SCREEN_LOGDIR/$PIDSTAT_FILE" - else - screen_it pidstat "pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL" - fi +# Per-process stats +PIDSTAT_OPTS="-l -p ALL -T ALL" +if [[ -n ${SCREEN_LOGDIR} ]]; then + screen_it pidstat "cd $TOP_DIR; pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL > $SCREEN_LOGDIR/$PIDSTAT_FILE" +else + screen_it pidstat "pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL" fi From af616d93411a9a446ce0d2e72ea4fb7d281cd940 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 17 Feb 2014 12:57:55 -0600 Subject: [PATCH 0175/4119] Move setup_develop() to common It's in the wrong place for current Grenade Change-Id: Ia670198332af5945a56d708cd83d9239df0c2287 --- functions | 54 ------------------------------------------------ functions-common | 52 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 54 deletions(-) diff --git a/functions b/functions index 6979c6c155..3101111c63 100644 --- a/functions +++ b/functions @@ -44,60 +44,6 @@ function cleanup_tmp { } -# ``pip install -e`` the package, which processes the dependencies -# using pip before running `setup.py develop` -# -# Updates the dependencies in project_dir from the -# openstack/requirements global list before installing anything. -# -# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``, ``UNDO_REQUIREMENTS`` -# setup_develop directory -function setup_develop() { - local project_dir=$1 - - echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" - - # Don't update repo if local changes exist - # Don't use buggy "git diff --quiet" - (cd $project_dir && git diff --exit-code >/dev/null) - local update_requirements=$? - - if [ $update_requirements -eq 0 ]; then - (cd $REQUIREMENTS_DIR; \ - $SUDO_CMD python update.py $project_dir) - fi - - setup_develop_no_requirements_update $project_dir - - # We've just gone and possibly modified the user's source tree in an - # automated way, which is considered bad form if it's a development - # tree because we've screwed up their next git checkin. So undo it. - # - # However... there are some circumstances, like running in the gate - # where we really really want the overridden version to stick. So provide - # a variable that tells us whether or not we should UNDO the requirements - # changes (this will be set to False in the OpenStack ci gate) - if [ $UNDO_REQUIREMENTS = "True" ]; then - if [ $update_requirements -eq 0 ]; then - (cd $project_dir && git reset --hard) - fi - fi -} - - -# ``pip install -e`` the package, which processes the dependencies -# using pip before running `setup.py develop` -# Uses globals ``STACK_USER`` -# setup_develop_no_requirements_update directory -function setup_develop_no_requirements_update() { - local project_dir=$1 - - pip_install -e $project_dir - # ensure that further actions can do things like setup.py sdist - safe_chown -R $STACK_USER $1/*.egg-info -} - - # Retrieve an image from a URL and upload into Glance. # Uses the following variables: # diff --git a/functions-common b/functions-common index d92e39cd91..d6f71b4825 100644 --- a/functions-common +++ b/functions-common @@ -1130,6 +1130,58 @@ function pip_install { && $SUDO_PIP rm -rf ${pip_build_tmp} } +# ``pip install -e`` the package, which processes the dependencies +# using pip before running `setup.py develop` +# +# Updates the dependencies in project_dir from the +# openstack/requirements global list before installing anything. +# +# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``, ``UNDO_REQUIREMENTS`` +# setup_develop directory +function setup_develop() { + local project_dir=$1 + + echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" + + # Don't update repo if local changes exist + # Don't use buggy "git diff --quiet" + (cd $project_dir && git diff --exit-code >/dev/null) + local update_requirements=$? + + if [ $update_requirements -eq 0 ]; then + (cd $REQUIREMENTS_DIR; \ + $SUDO_CMD python update.py $project_dir) + fi + + setup_develop_no_requirements_update $project_dir + + # We've just gone and possibly modified the user's source tree in an + # automated way, which is considered bad form if it's a development + # tree because we've screwed up their next git checkin. So undo it. + # + # However... there are some circumstances, like running in the gate + # where we really really want the overridden version to stick. So provide + # a variable that tells us whether or not we should UNDO the requirements + # changes (this will be set to False in the OpenStack ci gate) + if [ $UNDO_REQUIREMENTS = "True" ]; then + if [ $update_requirements -eq 0 ]; then + (cd $project_dir && git reset --hard) + fi + fi +} + +# ``pip install -e`` the package, which processes the dependencies +# using pip before running `setup.py develop` +# Uses globals ``STACK_USER`` +# setup_develop_no_requirements_update directory +function setup_develop_no_requirements_update() { + local project_dir=$1 + + pip_install -e $project_dir + # ensure that further actions can do things like setup.py sdist + safe_chown -R $STACK_USER $1/*.egg-info +} + # Service Functions # ================= From 71ef61ac8727137da01b3ca970a70b3adc81fd51 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Wed, 19 Feb 2014 22:19:24 -0800 Subject: [PATCH 0176/4119] Add variable to configure the run of IPv6 Tests Related Tempest change: https://review.openstack.org/#/c/74933/ Closes-bug: 1282387 Change-Id: If9e9c5319c484dc4c00ed3bdcefc132410719b87 --- lib/tempest | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/tempest b/lib/tempest index 596750b32f..d2227feed9 100644 --- a/lib/tempest +++ b/lib/tempest @@ -63,6 +63,9 @@ TEMPEST_VOLUME_DRIVER=${TEMPEST_VOLUME_DRIVER:-default} TEMPEST_VOLUME_VENDOR=${TEMPEST_VOLUME_VENDOR:-"Open Source"} TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-iSCSI} +# Neutron/Network variables +IPV6_ENABLED=$(trueorfalse True $IPV6_ENABLED) + # Functions # --------- @@ -285,11 +288,13 @@ function configure_tempest() { # Compute admin iniset $TEMPEST_CONFIG "compute-admin" password "$password" # DEPRECATED + # Network iniset $TEMPEST_CONFIG network api_version 2.0 iniset $TEMPEST_CONFIG network tenant_networks_reachable "$tenant_networks_reachable" iniset $TEMPEST_CONFIG network public_network_id "$public_network_id" iniset $TEMPEST_CONFIG network public_router_id "$public_router_id" iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE" + iniset $TEMPEST_CONFIG network ipv6_enabled "$IPV6_ENABLED" # boto iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" From 041fa712472d887550a540dd50ade546f847c6b4 Mon Sep 17 00:00:00 2001 From: David Kranz Date: Mon, 24 Feb 2014 13:30:59 -0500 Subject: [PATCH 0177/4119] Make admin_bind_host configurable The use case is running devstack inside an OpenStack vm and running tempest from some other machine. To make the catalog export urls that can be accessed from off the devstack machine, you need to set KEYSTONE_SERVICE_HOST to an external IP. But devstack uses that address in its setup of keystone in addition to exporting in the catalog. Because OpenStack has an issue where a vm cannot access itself through its own floating ip, devstack fails. There is no way to have this use case by providing an ip address. The workaround is to use the hostname of the devstack machine. That worked until recently when a change was made to set admin_bind_host to the value of KEYSTONE_SERVICE_HOST. The result is that port 35357 is only opened locally. This change allows the devstack user to restore the original behavior allowing this use case. Change-Id: I97b938b305b7dd878397e7e64462650064e59cd2 Closes-Bug: #1283803 --- lib/keystone | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index cebb4d3522..44ac94d802 100644 --- a/lib/keystone +++ b/lib/keystone @@ -70,6 +70,8 @@ KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001} KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +# Bind hosts +KEYSTONE_ADMIN_BIND_HOST=${KEYSTONE_ADMIN_BIND_HOST:-$KEYSTONE_SERVICE_HOST} # Set the tenant for service accounts in Keystone SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} @@ -178,7 +180,7 @@ function configure_keystone() { # Set the URL advertised in the ``versions`` structure returned by the '/' route iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(public_port)s/" iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(admin_port)s/" - iniset $KEYSTONE_CONF DEFAULT admin_bind_host "$KEYSTONE_SERVICE_HOST" + iniset $KEYSTONE_CONF DEFAULT admin_bind_host "$KEYSTONE_ADMIN_BIND_HOST" # Register SSL certificates if provided if is_ssl_enabled_service key; then From 80313b24404105fb68d1488d48e00574129ccd69 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Sun, 23 Feb 2014 09:55:01 -0500 Subject: [PATCH 0178/4119] match devstack-gate format support millisecond resolution and the | separator for ts vs. content. everything else in openstack is running at millisecond resolution, and some times it's actually useful to see that when debugging gate failures. Change-Id: I2227ab0b4965cd1a24b579bdf2ba8c1f9a432f70 --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index c153132485..eaccc76537 100755 --- a/stack.sh +++ b/stack.sh @@ -530,9 +530,9 @@ if [[ -n "$LOGFILE" ]]; then # Redirect stdout/stderr to tee to write the log file exec 1> >( awk ' { - cmd ="date +\"%Y-%m-%d %H:%M:%S \"" + cmd ="date +\"%Y-%m-%d %H:%M:%S.%3N | \"" cmd | getline now - close("date +\"%Y-%m-%d %H:%M:%S \"") + close("date +\"%Y-%m-%d %H:%M:%S.%3N | \"") sub(/^/, now) print fflush() From 4f1fee6eae300a3384900df06ebc857e95854eb0 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 24 Feb 2014 14:24:13 +1100 Subject: [PATCH 0179/4119] Fix missing libffi-devel for python-glanceclient python-glanceclient is failing to install on rhel6 with a dependency chain from pip as cryptography>=0.2.1 (from pyOpenSSL>=0.11->python-glanceclient==0.12.0.56.gb8a850c) cryptography requires libffi-devel to build. I'm not sure what changed, but remove it from "testonly" so it is always installed. However, RHEL6 includes this in the optional repo, so we enable this repo in the fixup script. Change-Id: I9da0e91b75f41578861ee9685b8c7e91dd12dae7 --- files/apts/glance | 2 +- files/rpms/glance | 2 +- tools/fixup_stuff.sh | 4 ++++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/files/apts/glance b/files/apts/glance index 22787bc5a2..6dc878e4de 100644 --- a/files/apts/glance +++ b/files/apts/glance @@ -1,5 +1,5 @@ gcc -libffi-dev # testonly +libffi-dev libmysqlclient-dev # testonly libpq-dev # testonly libssl-dev # testonly diff --git a/files/rpms/glance b/files/rpms/glance index 785ce25df5..25c5d3902b 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -1,5 +1,5 @@ gcc -libffi-devel # testonly +libffi-devel libxml2-devel # testonly libxslt-devel # testonly mysql-devel # testonly diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 47b0cd10cd..048024a325 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -98,6 +98,10 @@ if [[ $DISTRO =~ (rhel6) ]]; then sudo setenforce 0 fi + # make sure we have the "optional" repo enabled; it provides some + # packages like libffi-devel for example + sudo yum-config-manager --enable rhel-6-server-optional-rpms + # If the ``dbus`` package was installed by DevStack dependencies the # uuid may not be generated because the service was never started (PR#598200), # causing Nova to stop later on complaining that ``/var/lib/dbus/machine-id`` From 5f90fc06f5cd3138de112eddf1b04fe1db56d226 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 24 Feb 2014 15:40:42 +1100 Subject: [PATCH 0180/4119] Fix permissions for tempest.conf The current script uses 'sudo' to copy tempest.conf.sample and thus the .conf file gets owned by root. It then makes the permissions 644, meaning that when the 'stack' user does the iniset() calls, it doesn't have permisson on the .conf file. Since the dir has been chowned to the stack user, it seems safe to just copy the sample file in without sudo. In addition, I moved the $TEMPEST_CONFIG_DIR creation closer to the copy to make it clearer what's going on. Seems to be related to dc4dc7f03335e26ea3d86b6184f0475cc5f3d51b Fixes bug: #1284378 Change-Id: I103b4e90cbcfa693c9cef319f4135868a1b83de3 --- lib/tempest | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/lib/tempest b/lib/tempest index 596750b32f..83ce5d2e2a 100644 --- a/lib/tempest +++ b/lib/tempest @@ -87,11 +87,6 @@ function configure_tempest() { local boto_instance_type="m1.tiny" local ssh_connect_method="fixed" - if [[ ! -d $TEMPEST_CONFIG_DIR ]]; then - sudo mkdir -p $TEMPEST_CONFIG_DIR - fi - sudo chown $STACK_USER $TEMPEST_CONFIG_DIR - # TODO(afazekas): # sudo python setup.py deploy @@ -142,8 +137,12 @@ function configure_tempest() { # Create tempest.conf from tempest.conf.sample # copy every time, because the image UUIDS are going to change - sudo cp $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG - sudo chmod 644 $TEMPEST_CONFIG + if [[ ! -d $TEMPEST_CONFIG_DIR ]]; then + sudo mkdir -p $TEMPEST_CONFIG_DIR + fi + sudo chown $STACK_USER $TEMPEST_CONFIG_DIR + cp $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG + chmod 644 $TEMPEST_CONFIG password=${ADMIN_PASSWORD:-secrete} From 6857dbb2b4fb40a2ed3362ba46f7b130a85b2de1 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Tue, 25 Feb 2014 11:02:44 +0400 Subject: [PATCH 0181/4119] Savanna: use heat infra engine if available In Savanna we have two provisioning engines: * "savanna" that directly work with nova/neutron/glance/cinder/etc and we'd like to deprecate it early in Juno release cycle, but it's still useful due to the fact that it could work ok w/o Heat; * "heat" engine uses Heat for provisioning of cluster resources, it's currently under active development and we're ready to make it default for OpenStack installations with enabled Heat. Change-Id: I937337b3921e9e51768a118fb4b6bd95962622bd --- lib/savanna | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/savanna b/lib/savanna index 954f0e711e..9feff236bc 100644 --- a/lib/savanna +++ b/lib/savanna @@ -135,6 +135,12 @@ function configure_savanna() { iniset $SAVANNA_CONF_FILE DEFAULT use_floating_ips true fi + if is_service_enabled heat; then + iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine heat + else + iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine savanna + fi + iniset $SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG recreate_database savanna utf8 From 45917cc4d941a530d75a84fa4dff738fe87f928b Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 24 Feb 2014 16:09:14 -0500 Subject: [PATCH 0182/4119] xtrace less we are xtrace happy, however that's just generating bulk in log files that are mostly ignorable. For the basically bullet proof functions we should not xtrace. Change-Id: Iab4e6d270c1546e0db2a06395cefcdf7f7929c3c --- functions-common | 85 ++++++++++++++++++++++++++++++++++++++++-------- stack.sh | 1 + 2 files changed, 72 insertions(+), 14 deletions(-) diff --git a/functions-common b/functions-common index d92e39cd91..79003fcfaf 100644 --- a/functions-common +++ b/functions-common @@ -39,59 +39,76 @@ set +o xtrace # Append a new option in an ini file without replacing the old value # iniadd config-file section option value1 value2 value3 ... function iniadd() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 shift 3 local values="$(iniget_multiline $file $section $option) $@" iniset_multiline $file $section $option $values + $xtrace } # Comment an option in an INI file # inicomment config-file section option function inicomment() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" + $xtrace } # Get an option from an INI file # iniget config-file section option function iniget() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 local line line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") echo ${line#*=} + $xtrace } # Get a multiple line option from an INI file # iniget_multiline config-file section option function iniget_multiline() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 local values values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file") echo ${values} + $xtrace } # Determinate is the given option present in the INI file # ini_has_option config-file section option function ini_has_option() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 local line line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + $xtrace [ -n "$line" ] } # Set an option in an INI file # iniset config-file section option value function iniset() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 @@ -113,11 +130,14 @@ $option = $value # Replace it sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" fi + $xtrace } # Set a multiple line option in an INI file # iniset_multiline config-file section option value1 value2 valu3 ... function iniset_multiline() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 @@ -142,15 +162,19 @@ function iniset_multiline() { $option = $v " "$file" done + $xtrace } # Uncomment an option in an INI file # iniuncomment config-file section option function iniuncomment() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file" + $xtrace } # Normalize config values to True or False @@ -158,6 +182,8 @@ function iniuncomment() { # Accepts as True: 1 yes Yes YES true True TRUE # VAR=$(trueorfalse default-value test-value) function trueorfalse() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local default=$1 local testval=$2 @@ -165,6 +191,7 @@ function trueorfalse() { [[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; } [[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; } echo "$default" + $xtrace } @@ -675,9 +702,14 @@ function _get_package_dir() { # Uses globals ``OFFLINE``, ``*_proxy`` # apt_get operation package [package ...] function apt_get() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace + [[ "$OFFLINE" = "True" || -z "$@" ]] && return local sudo="sudo" [[ "$(id -u)" = "0" ]] && sudo="env" + + $xtrace $sudo DEBIAN_FRONTEND=noninteractive \ http_proxy=$http_proxy https_proxy=$https_proxy \ no_proxy=$no_proxy \ @@ -695,6 +727,8 @@ function apt_get() { # - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection # of the package to the distros listed. The distro names are case insensitive. function get_packages() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local services=$@ local package_dir=$(_get_package_dir) local file_to_parse @@ -706,6 +740,7 @@ function get_packages() { fi if [[ -z "$DISTRO" ]]; then GetDistro + echo "Found Distro $DISTRO" fi for service in ${services//,/ }; do # Allow individual services to specify dependencies @@ -797,23 +832,30 @@ function get_packages() { done IFS=$OIFS done + $xtrace } # Distro-agnostic package installer # install_package package [package ...] function install_package() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace if is_ubuntu; then # if there are transient errors pulling the updates, that's fine. It may # be secondary repositories that we don't really care about. [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update || /bin/true NO_UPDATE_REPOS=True + $xtrace apt_get install "$@" elif is_fedora; then + $xtrace yum_install "$@" elif is_suse; then + $xtrace zypper_install "$@" else + $xtrace exit_distro_not_supported "installing packages" fi } @@ -1092,7 +1134,13 @@ function get_python_exec_prefix() { # ``TRACK_DEPENDS``, ``*_proxy`` # pip_install package [package ...] function pip_install { - [[ "$OFFLINE" = "True" || -z "$@" ]] && return + local xtrace=$(set +o | grep xtrace) + set +o xtrace + if [[ "$OFFLINE" = "True" || -z "$@" ]]; then + $xtrace + return + fi + if [[ -z "$os_PACKAGE" ]]; then GetOSVersion fi @@ -1121,6 +1169,7 @@ function pip_install { # this problem. See https://github.com/pypa/pip/issues/709 local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX) + $xtrace $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \ HTTP_PROXY=$http_proxy \ HTTPS_PROXY=$https_proxy \ @@ -1235,32 +1284,36 @@ function enable_service() { # Uses global ``ENABLED_SERVICES`` # is_service_enabled service [service ...] function is_service_enabled() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace + local enabled=1 services=$@ for service in ${services}; do - [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 + [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && enabled=0 # Look for top-level 'enabled' function for this service if type is_${service}_enabled >/dev/null 2>&1; then # A function exists for this service, use it is_${service}_enabled - return $? + enabled=$? fi # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled() # are implemented - [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && return 0 - [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0 - [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0 - [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0 - [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0 - [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && return 0 - [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 - [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && return 0 - [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0 - [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0 + [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && enabled=0 + [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && enabled=0 + [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && enabled=0 + [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && enabled=0 + [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && enabled=0 + [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && enabled=0 + [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && enabled=0 + [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && enabled=0 + [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && enabled=0 + [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && enabled=0 done - return 1 + $xtrace + return $enabled } # Toggle enable/disable_service for services that must run exclusive of each other @@ -1286,6 +1339,8 @@ function use_exclusive_service { # Only run the command if the target file (the last arg) is not on an # NFS filesystem. function _safe_permission_operation() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local args=( $@ ) local last local sudo_cmd @@ -1299,6 +1354,7 @@ function _safe_permission_operation() { fi if is_nfs_directory "$dir_to_check" ; then + $xtrace return 0 fi @@ -1308,6 +1364,7 @@ function _safe_permission_operation() { sudo_cmd="sudo" fi + $xtrace $sudo_cmd $@ } diff --git a/stack.sh b/stack.sh index ce19b8fc5c..0fdac3394a 100755 --- a/stack.sh +++ b/stack.sh @@ -529,6 +529,7 @@ if [[ -n "$LOGFILE" ]]; then if [[ "$VERBOSE" == "True" ]]; then # Redirect stdout/stderr to tee to write the log file exec 1> >( awk ' + /((set \+o$)|xtrace)/ { next } { cmd ="date +\"%Y-%m-%d %H:%M:%S \"" cmd | getline now From dd029da5b9b5600b8f6893247645db4fb0b95efe Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Mon, 24 Feb 2014 18:09:10 +0000 Subject: [PATCH 0183/4119] Create stack_domain_admin user Create an additional service user for Heat, which is a domain admin for the stack_user_domain - this is necessary since the normal service user cannot manage the projects/users in the stack_user_domain when keystone is configured to use the v3cloudsample policy (such as in gate integration tests). Change-Id: If59c11a74145b9bd02f78a7e0882afe1b0a72e40 --- lib/heat | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/heat b/lib/heat index af10fa6f1d..1b6dc86989 100644 --- a/lib/heat +++ b/lib/heat @@ -207,6 +207,16 @@ function create_heat_accounts() { --description "Owns users and projects created by heat" \ | grep ' id ' | get_field 2) iniset $HEAT_CONF DEFAULT stack_user_domain ${D_ID} + + openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \ + --os-identity-api-version=3 user create --password $SERVICE_PASSWORD \ + --domain $D_ID heat_domain_admin \ + --description "Manages users and projects created by heat" + openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \ + --os-identity-api-version=3 role add \ + --user ${U_ID} --domain ${D_ID} admin + iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin + iniset $HEAT_CONF DEFAULT stack_domain_admin_password $SERVICE_PASSWORD } # Restore xtrace From 78096b5073c70ef2c1f0626c802e095cd288c097 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 25 Feb 2014 10:23:04 -0500 Subject: [PATCH 0184/4119] remove sysstat & pidstat dstat is far cleaner for getting results out of the environment, and covers the bulk of our use cases for sysstat and pidstat with a much better ui. devstack is allowed to be opinionated, so become opinionated here. Change-Id: I21ec96339dcd704098512fdafd896738f352962d --- files/apts/sysstat | 1 - files/rpms-suse/sysstat | 1 - files/rpms/sysstat | 1 - stack.sh | 33 +--------------- tools/sar_filter.py | 86 ----------------------------------------- 5 files changed, 2 insertions(+), 120 deletions(-) delete mode 100644 files/apts/sysstat delete mode 100644 files/rpms-suse/sysstat delete mode 100644 files/rpms/sysstat delete mode 100755 tools/sar_filter.py diff --git a/files/apts/sysstat b/files/apts/sysstat deleted file mode 100644 index ea0c342d91..0000000000 --- a/files/apts/sysstat +++ /dev/null @@ -1 +0,0 @@ -sysstat diff --git a/files/rpms-suse/sysstat b/files/rpms-suse/sysstat deleted file mode 100644 index ea0c342d91..0000000000 --- a/files/rpms-suse/sysstat +++ /dev/null @@ -1 +0,0 @@ -sysstat diff --git a/files/rpms/sysstat b/files/rpms/sysstat deleted file mode 100644 index ea0c342d91..0000000000 --- a/files/rpms/sysstat +++ /dev/null @@ -1 +0,0 @@ -sysstat diff --git a/stack.sh b/stack.sh index 9f08e0f017..1d281587b1 100755 --- a/stack.sh +++ b/stack.sh @@ -294,15 +294,9 @@ SYSLOG=`trueorfalse False $SYSLOG` SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP} SYSLOG_PORT=${SYSLOG_PORT:-516} -# Enable sysstat logging -SYSSTAT_FILE=${SYSSTAT_FILE:-"sysstat.dat"} -SYSSTAT_INTERVAL=${SYSSTAT_INTERVAL:-"1"} - +# for DSTAT logging DSTAT_FILE=${DSTAT_FILE:-"dstat.txt"} -PIDSTAT_FILE=${PIDSTAT_FILE:-"pidstat.txt"} -PIDSTAT_INTERVAL=${PIDSTAT_INTERVAL:-"5"} - # Use color for logging output (only available if syslog is not used) LOG_COLOR=`trueorfalse True $LOG_COLOR` @@ -862,23 +856,9 @@ fi # Initialize the directory for service status check init_service_check - -# Sysstat and friends +# Dstat # ------- -# If enabled, systat has to start early to track OpenStack service startup. -# what we want to measure -# -u : cpu statitics -# -q : load -# -b : io load rates -# -w : process creation and context switch rates -SYSSTAT_OPTS="-u -q -b -w" -if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it sysstat "cd $TOP_DIR; ./tools/sar_filter.py $SYSSTAT_OPTS -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" -else - screen_it sysstat "./tools/sar_filter.py $SYSSTAT_OPTS $SYSSTAT_INTERVAL" -fi - # A better kind of sysstat, with the top process per time slice DSTAT_OPTS="-tcndylp --top-cpu-adv" if [[ -n ${SCREEN_LOGDIR} ]]; then @@ -887,15 +867,6 @@ else screen_it dstat "dstat $DSTAT_OPTS" fi -# Per-process stats -PIDSTAT_OPTS="-l -p ALL -T ALL" -if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it pidstat "cd $TOP_DIR; pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL > $SCREEN_LOGDIR/$PIDSTAT_FILE" -else - screen_it pidstat "pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL" -fi - - # Start Services # ============== diff --git a/tools/sar_filter.py b/tools/sar_filter.py deleted file mode 100755 index 24ef0e476c..0000000000 --- a/tools/sar_filter.py +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2014 Samsung Electronics Corp. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re -import subprocess -import sys - - -def is_data_line(line): - timestamp, data = parse_line(line) - return re.search('\d\.d', data) - - -def parse_line(line): - m = re.search('(\d\d:\d\d:\d\d( \w\w)?)(\s+((\S+)\s*)+)', line) - if m: - date = m.group(1) - data = m.group(3).rstrip() - return date, data - else: - return None, None - - -process = subprocess.Popen( - "sar %s" % " ".join(sys.argv[1:]), - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - -# Poll process for new output until finished - -start_time = "" -header = "" -data_line = "" -printed_header = False -current_ts = None - -# print out the first sysstat line regardless -print process.stdout.readline() - -while True: - nextline = process.stdout.readline() - if nextline == '' and process.poll() is not None: - break - - date, data = parse_line(nextline) - # stop until we get to the first set of real lines - if not date: - continue - - # now we eat the header lines, and only print out the header - # if we've never seen them before - if not start_time: - start_time = date - header += "%s %s" % (date, data) - elif date == start_time: - header += " %s" % data - elif not printed_header: - printed_header = True - print header - - # now we know this is a data line, printing out if the timestamp - # has changed, and stacking up otherwise. - nextline = process.stdout.readline() - date, data = parse_line(nextline) - if date != current_ts: - current_ts = date - print data_line - data_line = "%s %s" % (date, data) - else: - data_line += " %s" % data - - sys.stdout.flush() From b8e250232ec55b946d2fd7e4237f12632408bdcc Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 21 Feb 2014 16:14:29 +1100 Subject: [PATCH 0185/4119] Add end-of-file checks to bash8 Add two end-of-file checks to bash8. Firstly, alert if heredoc hasn't finished. Some heredocs were done like: --- sudo bash -c "cat < foo ... EOF" --- (A better way to do this is "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla + cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla + cat < $rules_dir/50-libvirt-$STACK_USER.rules + cat < Date: Wed, 26 Feb 2014 11:16:09 +1100 Subject: [PATCH 0186/4119] Run yum repolist commands as root Otherwise you get yum errors like [1] when you run stack.sh as !root. The solution is to run yum commands as root so it can access the right certs [1] https://access.redhat.com/site/solutions/312413 Change-Id: I54b0df13508c50aba67e23da11953c536933917a --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 9cdf2648f1..b3c507b600 100755 --- a/stack.sh +++ b/stack.sh @@ -181,7 +181,7 @@ if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then # Installing Open vSwitch on RHEL6 requires enabling the RDO repo. RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack-havana/rdo-release-havana.rpm"} RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-havana"} - if ! yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then + if ! sudo yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then echo "RDO repo not detected; installing" yum_install $RHEL6_RDO_REPO_RPM || \ die $LINENO "Error installing RDO repo, cannot continue" @@ -189,7 +189,7 @@ if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then # RHEL6 requires EPEL for many Open Stack dependencies RHEL6_EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"} - if ! yum repolist enabled epel | grep -q 'epel'; then + if ! sudo yum repolist enabled epel | grep -q 'epel'; then echo "EPEL not detected; installing" yum_install ${RHEL6_EPEL_RPM} || \ die $LINENO "Error installing EPEL repo, cannot continue" From 3e37326a3566ac38ea7ccf053fc183b7a8fccc08 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 26 Feb 2014 13:29:31 +1100 Subject: [PATCH 0187/4119] Move enablement of rhel6-optional repo earlier Change 4f1fee6eae300a3384900df06ebc857e95854eb0 added the RHEL6 optional repo in fixup_stuff.sh, but it turns out that doesn't get run until after the package prerequisites phase. Move this into stack.sh with the RDO repo setup. Change-Id: Iae0df85fa94c6c1b6f497dd29fda90d03b903a41 --- stack.sh | 4 ++++ tools/fixup_stuff.sh | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index 9cdf2648f1..217afbc2e3 100755 --- a/stack.sh +++ b/stack.sh @@ -194,6 +194,10 @@ if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then yum_install ${RHEL6_EPEL_RPM} || \ die $LINENO "Error installing EPEL repo, cannot continue" fi + + # ... and also optional to be enabled + sudo yum-config-manager --enable rhel-6-server-optional-rpms + fi diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 048024a325..47b0cd10cd 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -98,10 +98,6 @@ if [[ $DISTRO =~ (rhel6) ]]; then sudo setenforce 0 fi - # make sure we have the "optional" repo enabled; it provides some - # packages like libffi-devel for example - sudo yum-config-manager --enable rhel-6-server-optional-rpms - # If the ``dbus`` package was installed by DevStack dependencies the # uuid may not be generated because the service was never started (PR#598200), # causing Nova to stop later on complaining that ``/var/lib/dbus/machine-id`` From 201850120bec762347b80b22b5c60df43a262c6e Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Mon, 3 Feb 2014 12:14:08 -0500 Subject: [PATCH 0188/4119] Make python-guestfs NOPRIME & install for libvirt The libguestfs dependency tree includes a number of packages that we may not want or work everywhere, such as fuse. Now python-(lib)guestfs will install from lib/nova_plugins/hypervisor-libvirt Change-Id: I6c3a614010ee8d65813eec66a56680def622514c --- files/apts/n-cpu | 2 +- files/rpms/n-cpu | 2 +- lib/nova_plugins/hypervisor-libvirt | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/files/apts/n-cpu b/files/apts/n-cpu index b287107256..a82304dfe2 100644 --- a/files/apts/n-cpu +++ b/files/apts/n-cpu @@ -5,4 +5,4 @@ open-iscsi-utils # Deprecated since quantal dist:precise genisoimage sysfsutils sg3-utils -python-guestfs +python-guestfs # NOPRIME diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index e4fdaf4eda..32b1546c39 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -4,4 +4,4 @@ lvm2 genisoimage sysfsutils sg3_utils -python-libguestfs +python-libguestfs # NOPRIME diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 415244ffae..7f0880494e 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -140,10 +140,12 @@ function install_nova_hypervisor() { install_package kvm install_package libvirt-bin install_package python-libvirt + install_package python-guestfs elif is_fedora || is_suse; then install_package kvm install_package libvirt install_package libvirt-python + install_package python-libguestfs fi # Install and configure **LXC** if specified. LXC is another approach to From 06ba5193bebe27b2d7ead2d31ed9171885c6a5d8 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Wed, 26 Feb 2014 13:46:56 +1000 Subject: [PATCH 0189/4119] Insecure check if keystone is up If we start keystone with an SSL endpoint then the curl check to see if it is running will fail because it cannot create a secure connection. This check can be done insecurely as all we care about is that the service has started. Change-Id: I826753d4d46e9956f443110029346bc70282951a --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index cebb4d3522..73af1d356d 100644 --- a/lib/keystone +++ b/lib/keystone @@ -424,7 +424,7 @@ function start_keystone() { fi echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -k -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then die $LINENO "keystone did not start" fi From 3b57829ece7aa231770b640afd6da961dae2fc1c Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 26 Feb 2014 14:52:02 +1100 Subject: [PATCH 0190/4119] Don't use --skip-redirect for cinder restart on rhel6 RHEL6 doesn't support this flag so the restart fails. Not exactly sure why it is required, seems unchagned from the initial commit 67787e6b4c6f31388cbee6d83b67371b31c443d4 (found running stack.sh with -e per [1]) [1] https://review.openstack.org/#/c/71996/ Change-Id: Ib34c3663409d7b96b932286cb5a6974e940075d3 --- lib/cinder | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index c8c90c098d..e8f30b683c 100644 --- a/lib/cinder +++ b/lib/cinder @@ -496,8 +496,12 @@ function start_cinder() { sudo stop tgt || true sudo start tgt elif is_fedora; then - # bypass redirection to systemctl during restart - sudo /sbin/service --skip-redirect tgtd restart + if [[ $DISTRO =~ (rhel6) ]]; then + sudo /sbin/service tgtd restart + else + # bypass redirection to systemctl during restart + sudo /sbin/service --skip-redirect tgtd restart + fi elif is_suse; then restart_service tgtd else From 1755f689e807cd73b7bb2c67ac0531afbc8c6448 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 26 Feb 2014 13:08:00 -0600 Subject: [PATCH 0191/4119] Fix heat role create error https://review.openstack.org/#/c/76036/ changed the user creat commands, missed the argument to --user Change-Id: Iaf10ef80a2fb0227dd66a314e7ec253dfb4dc4fe --- lib/heat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index 1b6dc86989..972c35ce72 100644 --- a/lib/heat +++ b/lib/heat @@ -214,7 +214,7 @@ function create_heat_accounts() { --description "Manages users and projects created by heat" openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \ --os-identity-api-version=3 role add \ - --user ${U_ID} --domain ${D_ID} admin + --user heat_domain_admin --domain ${D_ID} admin iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin iniset $HEAT_CONF DEFAULT stack_domain_admin_password $SERVICE_PASSWORD } From a25a6f6d80cb844f13540fecf616b289c42e3ebe Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 24 Feb 2014 16:03:41 -0600 Subject: [PATCH 0192/4119] Unbuffer log output * Force-flush log output so we don't lose log output in certain error cases. * Slow down exit paths: add sleep to die(), wait until last moment to kill child processes (including the awk log output filter) Change-Id: I1620fd33b89b237d9c2bb6206f3de2c81719f676 --- functions-common | 2 ++ stack.sh | 26 +++++++++++++++----------- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/functions-common b/functions-common index 79003fcfaf..4bc3bbaac5 100644 --- a/functions-common +++ b/functions-common @@ -222,6 +222,8 @@ function die() { fi backtrace 2 err $line "$*" + # Give buffers a second to flush + sleep 1 exit $exitcode } diff --git a/stack.sh b/stack.sh index 22a418f306..c95199769f 100755 --- a/stack.sh +++ b/stack.sh @@ -522,7 +522,7 @@ if [[ -n "$LOGFILE" ]]; then exec 3>&1 if [[ "$VERBOSE" == "True" ]]; then # Redirect stdout/stderr to tee to write the log file - exec 1> >( awk ' + exec 1> >( awk -v logfile=${LOGFILE} ' /((set \+o$)|xtrace)/ { next } { cmd ="date +\"%Y-%m-%d %H:%M:%S.%3N | \"" @@ -530,8 +530,9 @@ if [[ -n "$LOGFILE" ]]; then close("date +\"%Y-%m-%d %H:%M:%S.%3N | \"") sub(/^/, now) print - fflush() - }' | tee "${LOGFILE}" ) 2>&1 + print > logfile + fflush("") + }' ) 2>&1 # Set up a second fd for output exec 6> >( tee "${SUMFILE}" ) else @@ -579,21 +580,24 @@ fi # ----------------------- # Kill background processes on exit -trap clean EXIT -clean() { +trap exit_trap EXIT +function exit_trap { local r=$? - kill >/dev/null 2>&1 $(jobs -p) + echo "exit_trap called, cleaning up child processes" + kill 2>&1 $(jobs -p) exit $r } - # Exit on any errors so that errors don't compound -trap failed ERR -failed() { +trap err_trap ERR +function err_trap { local r=$? - kill >/dev/null 2>&1 $(jobs -p) set +o xtrace - [ -n "$LOGFILE" ] && echo "${0##*/} failed: full log in $LOGFILE" + if [[ -n "$LOGFILE" ]]; then + echo "${0##*/} failed: full log in $LOGFILE" + else + echo "${0##*/} failed" + fi exit $r } From 09bd7c8fd5a662ef697eb61638efbe862a4875a6 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 3 Feb 2014 08:35:26 +0900 Subject: [PATCH 0193/4119] enable -o errexit devstack should run under -o errexit to ensure that we fail early when something has gone wrong, otherwise determination of the root failure location is often quite challenging. this clears all the normal use cases for devstack, there could be tests which now die early, which we're going to have to check for later. Change-Id: Ibd828c4f4fd95a60d3918d3d7ae90e10649479ab --- functions-common | 3 ++- stack.sh | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 4bc3bbaac5..3e29e8c7de 100644 --- a/functions-common +++ b/functions-common @@ -1094,7 +1094,8 @@ function service_check() { fi # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME - failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null` + # make this -o errexit safe + failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null || /bin/true` for service in $failures; do service=`basename $service` diff --git a/stack.sh b/stack.sh index c95199769f..d43a73a889 100755 --- a/stack.sh +++ b/stack.sh @@ -601,6 +601,9 @@ function err_trap { exit $r } + +set -o errexit + # Print the commands being run so that we can see the command that triggers # an error. It is also useful for following along as the install occurs. set -o xtrace From a42650fb7e4d3fc8853f04d84109199fa1d9f5e4 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 27 Feb 2014 13:08:30 +0100 Subject: [PATCH 0194/4119] Fix libvirt polkit settings After the https://review.openstack.org/#/c/75314 merged the /etc/polkit-1/rules.d/50-libvirt-stack.rules files contains subject.user == '"stack"' instead of subject.user == 'stack'. Change-Id: I09f252b2d0e53f012facb9f7eaa21c1e1bdf492b --- lib/nova_plugins/hypervisor-libvirt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index a550600363..dc999edfe9 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -87,7 +87,7 @@ EOF cat < Date: Thu, 27 Feb 2014 11:13:36 -0600 Subject: [PATCH 0195/4119] Fix exit_trap() error if no child processes Bug-Id: 1285776 Change-Id: Iad7a9f2c03cc39159beda55345f232cefed10520 --- stack.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 4333fb2c7e..f4342dd206 100755 --- a/stack.sh +++ b/stack.sh @@ -587,8 +587,11 @@ fi trap exit_trap EXIT function exit_trap { local r=$? - echo "exit_trap called, cleaning up child processes" - kill 2>&1 $(jobs -p) + jobs=$(jobs -p) + if [[ -n $jobs ]]; then + echo "exit_trap: cleaning up child processes" + kill 2>&1 $jobs + fi exit $r } From 83b6c99b503dced1e92761e1de8ceaf23a396453 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 27 Feb 2014 12:41:28 -0600 Subject: [PATCH 0196/4119] Handle non-zero exit code from git diff The check for a changed repo in setup_develop() 'git diff --exit-code' returns a status of 1 when the repo has changes; trap that so errexit does not abort the script. Bug-Id: 1285780 Change-Id: Ic97e68348f46245b271567893b447fcedbd7bd6e --- functions-common | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/functions-common b/functions-common index 8e6b2b1895..0d85068a2f 100644 --- a/functions-common +++ b/functions-common @@ -1223,14 +1223,12 @@ function pip_install { function setup_develop() { local project_dir=$1 - echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" - # Don't update repo if local changes exist # Don't use buggy "git diff --quiet" - (cd $project_dir && git diff --exit-code >/dev/null) - local update_requirements=$? + # ``errexit`` requires us to trap the exit code when the repo is changed + local update_requirements=$(cd $project_dir && git diff --exit-code >/dev/null || echo "changed") - if [ $update_requirements -eq 0 ]; then + if [[ $update_requirements = "changed" ]]; then (cd $REQUIREMENTS_DIR; \ $SUDO_CMD python update.py $project_dir) fi @@ -1246,7 +1244,7 @@ function setup_develop() { # a variable that tells us whether or not we should UNDO the requirements # changes (this will be set to False in the OpenStack ci gate) if [ $UNDO_REQUIREMENTS = "True" ]; then - if [ $update_requirements -eq 0 ]; then + if [[ $update_requirements = "changed" ]]; then (cd $project_dir && git reset --hard) fi fi From 657ce7fa213b680904c07f09029467d8a195761d Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Thu, 27 Feb 2014 10:50:38 -0800 Subject: [PATCH 0197/4119] Stop trying to create the 'ironic' user twice After 09bd7c8fd5a6 landed, a conflict between lib/ironic and extras.d/50-ironic.sh was exposed, breaking Ironic's check and gate tests. This resolves that conflict by only creating the 'ironic' user once. Change-Id: Ic41517f0977c84a82f92f58565aaee6b5cc7eb3e --- lib/ironic | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/ironic b/lib/ironic index 607b13125a..f4454749dc 100644 --- a/lib/ironic +++ b/lib/ironic @@ -186,9 +186,6 @@ function init_ironic() { $IRONIC_BIN_DIR/ironic-dbsync create_ironic_cache_dir - - # Create keystone artifacts for Ironic. - create_ironic_accounts } # start_ironic() - Start running processes, including screen From aee18c749b0e3a1a3a6907a33db76ae83b8d41d9 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 21 Feb 2014 15:35:08 +1100 Subject: [PATCH 0198/4119] Enforce function declaration format in bash8 Check that function calls look like ^function foo {$ in bash8, and fix all existing failures of that check. Add a note to HACKING.rst Change-Id: Ic19eecb39e0b20273d1bcd551a42fe400d54e938 --- HACKING.rst | 2 + driver_certs/cinder_driver_cert.sh | 2 +- exercises/aggregates.sh | 2 +- exercises/client-args.sh | 2 +- exercises/client-env.sh | 2 +- exercises/neutron-adv-test.sh | 6 +- functions | 22 ++-- functions-common | 122 ++++++++++---------- lib/apache | 14 +-- lib/baremetal | 22 ++-- lib/ceilometer | 18 +-- lib/cinder | 28 ++--- lib/cinder_plugins/XenAPINFS | 2 +- lib/cinder_plugins/glusterfs | 2 +- lib/cinder_plugins/nfs | 2 +- lib/cinder_plugins/sheepdog | 2 +- lib/cinder_plugins/solidfire | 2 +- lib/cinder_plugins/vsphere | 2 +- lib/config | 8 +- lib/gantt | 14 +-- lib/glance | 16 +-- lib/heat | 18 +-- lib/horizon | 14 +-- lib/infra | 4 +- lib/ironic | 26 ++--- lib/keystone | 22 ++-- lib/ldap | 14 +-- lib/marconi | 20 ++-- lib/neutron | 85 +++++++------- lib/neutron_plugins/bigswitch_floodlight | 22 ++-- lib/neutron_plugins/brocade | 22 ++-- lib/neutron_plugins/cisco | 42 +++---- lib/neutron_plugins/embrane | 6 +- lib/neutron_plugins/linuxbridge | 6 +- lib/neutron_plugins/linuxbridge_agent | 18 +-- lib/neutron_plugins/midonet | 24 ++-- lib/neutron_plugins/ml2 | 8 +- lib/neutron_plugins/nec | 26 ++--- lib/neutron_plugins/openvswitch | 6 +- lib/neutron_plugins/openvswitch_agent | 16 +-- lib/neutron_plugins/ovs_base | 16 +-- lib/neutron_plugins/plumgrid | 16 +-- lib/neutron_plugins/ryu | 22 ++-- lib/neutron_plugins/services/firewall | 6 +- lib/neutron_plugins/services/loadbalancer | 8 +- lib/neutron_plugins/services/metering | 6 +- lib/neutron_plugins/services/vpn | 6 +- lib/neutron_plugins/vmware_nsx | 26 ++--- lib/neutron_thirdparty/bigswitch_floodlight | 12 +- lib/neutron_thirdparty/midonet | 12 +- lib/neutron_thirdparty/ryu | 12 +- lib/neutron_thirdparty/trema | 16 +-- lib/neutron_thirdparty/vmware_nsx | 12 +- lib/nova | 34 +++--- lib/nova_plugins/hypervisor-baremetal | 10 +- lib/nova_plugins/hypervisor-docker | 10 +- lib/nova_plugins/hypervisor-fake | 10 +- lib/nova_plugins/hypervisor-libvirt | 10 +- lib/nova_plugins/hypervisor-openvz | 10 +- lib/nova_plugins/hypervisor-vsphere | 10 +- lib/nova_plugins/hypervisor-xenserver | 10 +- lib/oslo | 4 +- lib/rpc_backend | 10 +- lib/savanna | 12 +- lib/savanna-dashboard | 8 +- lib/stackforge | 4 +- lib/swift | 24 ++-- lib/tempest | 6 +- lib/template | 12 +- lib/tls | 20 ++-- lib/trove | 20 ++-- stack.sh | 6 +- tests/functions.sh | 6 +- tests/test_config.sh | 6 +- tools/bash8.py | 16 +++ tools/build_pxe_env.sh | 2 +- tools/build_ramdisk.sh | 4 +- tools/build_uec_ramdisk.sh | 2 +- tools/build_usb_boot.sh | 2 +- tools/copy_dev_environment_to_uec.sh | 2 +- tools/create_userrc.sh | 11 +- tools/fixup_stuff.sh | 2 +- tools/get_uec_image.sh | 4 +- tools/info.sh | 2 +- tools/install_openvpn.sh | 8 +- tools/install_pip.sh | 6 +- tools/jenkins/build_configuration.sh | 2 +- tools/jenkins/configurations/kvm.sh | 2 +- tools/jenkins/configurations/xs.sh | 2 +- tools/jenkins/run_test.sh | 2 +- tools/warm_apts_for_uec.sh | 2 +- tools/xen/build_xva.sh | 4 +- tools/xen/install_os_domU.sh | 6 +- tools/xen/prepare_guest.sh | 2 +- 94 files changed, 601 insertions(+), 585 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index 103b579621..5c15537915 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -275,3 +275,5 @@ Variables and Functions - local variables should be lower case, global variables should be upper case - function names should_have_underscores, NotCamelCase. +- functions should be declared as per the regex ^function foo {$ + with code starting on the next line diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh index e45b7f8736..d2c636f89d 100755 --- a/driver_certs/cinder_driver_cert.sh +++ b/driver_certs/cinder_driver_cert.sh @@ -32,7 +32,7 @@ source $TOP_DIR/lib/cinder TEMPFILE=`mktemp` RECLONE=True -function log_message() { +function log_message { MESSAGE=$1 STEP_HEADER=$2 if [[ "$STEP_HEADER" = "True" ]]; then diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index d223301f35..01d548d1f2 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -57,7 +57,7 @@ AGGREGATE_NAME=test_aggregate_$RANDOM AGGREGATE2_NAME=test_aggregate_$RANDOM AGGREGATE_A_ZONE=nova -exit_if_aggregate_present() { +function exit_if_aggregate_present { aggregate_name=$1 if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then diff --git a/exercises/client-args.sh b/exercises/client-args.sh index e79774f98c..b360f1e86a 100755 --- a/exercises/client-args.sh +++ b/exercises/client-args.sh @@ -154,7 +154,7 @@ set +o xtrace # Results # ======= -function report() { +function report { if [[ -n "$2" ]]; then echo "$1: $2" fi diff --git a/exercises/client-env.sh b/exercises/client-env.sh index 6c6fe12282..d955e4d1e1 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -165,7 +165,7 @@ set +o xtrace # Results # ======= -function report() { +function report { if [[ -n "$2" ]]; then echo "$1: $2" fi diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index a9199e62a6..0a24fe9df7 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -20,7 +20,7 @@ echo "*********************************************************************" set -o errtrace trap failed ERR -failed() { +function failed { local r=$? set +o errtrace set +o xtrace @@ -395,7 +395,7 @@ function test_functions { # Usage and main # -------------- -usage() { +function usage { echo "$0: [-h]" echo " -h, --help Display help message" echo " -t, --tenant Create tenants" @@ -408,7 +408,7 @@ usage() { echo " -T, --test Test functions" } -main() { +function main { echo Description diff --git a/functions b/functions index 3101111c63..43639c79fb 100644 --- a/functions +++ b/functions @@ -51,7 +51,7 @@ function cleanup_tmp { # - ``GLANCE_HOSTPORT`` # # upload_image image-url glance-token -function upload_image() { +function upload_image { local image_url=$1 local token=$2 @@ -341,7 +341,7 @@ function use_database { # Wait for an HTTP server to start answering requests # wait_for_service timeout url -function wait_for_service() { +function wait_for_service { local timeout=$1 local url=$2 timeout $timeout sh -c "while ! curl --noproxy '*' -s $url >/dev/null; do sleep 1; done" @@ -351,7 +351,7 @@ function wait_for_service() { # ping check # Uses globals ``ENABLED_SERVICES`` # ping_check from-net ip boot-timeout expected -function ping_check() { +function ping_check { if is_service_enabled neutron; then _ping_check_neutron "$1" $2 $3 $4 return @@ -361,7 +361,7 @@ function ping_check() { # ping check for nova # Uses globals ``MULTI_HOST``, ``PRIVATE_NETWORK`` -function _ping_check_novanet() { +function _ping_check_novanet { local from_net=$1 local ip=$2 local boot_timeout=$3 @@ -386,7 +386,7 @@ function _ping_check_novanet() { } # Get ip of instance -function get_instance_ip(){ +function get_instance_ip { local vm_id=$1 local network_name=$2 local nova_result="$(nova show $vm_id)" @@ -401,7 +401,7 @@ function get_instance_ip(){ # ssh check # ssh_check net-name key-file floating-ip default-user active-timeout -function ssh_check() { +function ssh_check { if is_service_enabled neutron; then _ssh_check_neutron "$1" $2 $3 $4 $5 return @@ -409,7 +409,7 @@ function ssh_check() { _ssh_check_novanet "$1" $2 $3 $4 $5 } -function _ssh_check_novanet() { +function _ssh_check_novanet { local NET_NAME=$1 local KEY_FILE=$2 local FLOATING_IP=$3 @@ -425,7 +425,7 @@ function _ssh_check_novanet() { # Get the location of the $module-rootwrap executables, where module is cinder # or nova. # get_rootwrap_location module -function get_rootwrap_location() { +function get_rootwrap_location { local module=$1 echo "$(get_python_exec_prefix)/$module-rootwrap" @@ -434,7 +434,7 @@ function get_rootwrap_location() { # Path permissions sanity check # check_path_perm_sanity path -function check_path_perm_sanity() { +function check_path_perm_sanity { # Ensure no element of the path has 0700 permissions, which is very # likely to cause issues for daemons. Inspired by default 0700 # homedir permissions on RHEL and common practice of making DEST in @@ -505,7 +505,7 @@ function _vercmp_r { # The above will return "0", as the versions are equal. # # vercmp_numbers ver1 ver2 -vercmp_numbers() { +function vercmp_numbers { typeset v1=$1 v2=$2 sep typeset -a ver1 ver2 @@ -523,7 +523,7 @@ vercmp_numbers() { # Defaults are respectively 'project_name' and 'user_name' # # setup_colorized_logging something.conf SOMESECTION -function setup_colorized_logging() { +function setup_colorized_logging { local conf_file=$1 local conf_section=$2 local project_var=${3:-"project_name"} diff --git a/functions-common b/functions-common index 2248fbb610..eba4985e40 100644 --- a/functions-common +++ b/functions-common @@ -38,7 +38,7 @@ set +o xtrace # Append a new option in an ini file without replacing the old value # iniadd config-file section option value1 value2 value3 ... -function iniadd() { +function iniadd { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -52,7 +52,7 @@ function iniadd() { # Comment an option in an INI file # inicomment config-file section option -function inicomment() { +function inicomment { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -64,7 +64,7 @@ function inicomment() { # Get an option from an INI file # iniget config-file section option -function iniget() { +function iniget { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -78,7 +78,7 @@ function iniget() { # Get a multiple line option from an INI file # iniget_multiline config-file section option -function iniget_multiline() { +function iniget_multiline { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -92,7 +92,7 @@ function iniget_multiline() { # Determinate is the given option present in the INI file # ini_has_option config-file section option -function ini_has_option() { +function ini_has_option { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -106,7 +106,7 @@ function ini_has_option() { # Set an option in an INI file # iniset config-file section option value -function iniset() { +function iniset { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -135,7 +135,7 @@ $option = $value # Set a multiple line option in an INI file # iniset_multiline config-file section option value1 value2 valu3 ... -function iniset_multiline() { +function iniset_multiline { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -167,7 +167,7 @@ $option = $v # Uncomment an option in an INI file # iniuncomment config-file section option -function iniuncomment() { +function iniuncomment { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -181,7 +181,7 @@ function iniuncomment() { # Accepts as False: 0 no No NO false False FALSE # Accepts as True: 1 yes Yes YES true True TRUE # VAR=$(trueorfalse default-value test-value) -function trueorfalse() { +function trueorfalse { local xtrace=$(set +o | grep xtrace) set +o xtrace local default=$1 @@ -213,7 +213,7 @@ function backtrace { # Prints line number and "message" then exits # die $LINENO "message" -function die() { +function die { local exitcode=$? set +o xtrace local line=$1; shift @@ -231,7 +231,7 @@ function die() { # exit code is non-zero and prints "message" and exits # NOTE: env-var is the variable name without a '$' # die_if_not_set $LINENO env-var "message" -function die_if_not_set() { +function die_if_not_set { local exitcode=$? FXTRACE=$(set +o | grep xtrace) set +o xtrace @@ -245,7 +245,7 @@ function die_if_not_set() { # Prints line number and "message" in error format # err $LINENO "message" -function err() { +function err { local exitcode=$? errXTRACE=$(set +o | grep xtrace) set +o xtrace @@ -262,7 +262,7 @@ function err() { # exit code is non-zero and prints "message" # NOTE: env-var is the variable name without a '$' # err_if_not_set $LINENO env-var "message" -function err_if_not_set() { +function err_if_not_set { local exitcode=$? errinsXTRACE=$(set +o | grep xtrace) set +o xtrace @@ -291,14 +291,14 @@ function exit_distro_not_supported { # Test if the named environment variable is set and not zero length # is_set env-var -function is_set() { +function is_set { local var=\$"$1" eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this } # Prints line number and "message" in warning format # warn $LINENO "message" -function warn() { +function warn { local exitcode=$? errXTRACE=$(set +o | grep xtrace) set +o xtrace @@ -324,7 +324,7 @@ function warn() { # os_PACKAGE - package type # os_CODENAME - vendor's codename for release # GetOSVersion -GetOSVersion() { +function GetOSVersion { # Figure out which vendor we are if [[ -x "`which sw_vers 2>/dev/null`" ]]; then # OS/X @@ -414,7 +414,7 @@ GetOSVersion() { # Translate the OS version values into common nomenclature # Sets global ``DISTRO`` from the ``os_*`` values -function GetDistro() { +function GetDistro { GetOSVersion if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then # 'Everyone' refers to Ubuntu / Debian releases by the code name adjective @@ -491,7 +491,7 @@ function is_ubuntu { # Returns openstack release name for a given branch name # ``get_release_name_from_branch branch-name`` -function get_release_name_from_branch(){ +function get_release_name_from_branch { local branch=$1 if [[ $branch =~ "stable/" ]]; then echo ${branch#*/} @@ -577,7 +577,7 @@ function git_clone { # to timeout(1); otherwise the default value of 0 maintains the status # quo of waiting forever. # usage: git_timed -function git_timed() { +function git_timed { local count=0 local timeout=0 @@ -603,7 +603,7 @@ function git_timed() { # git update using reference as a branch. # git_update_branch ref -function git_update_branch() { +function git_update_branch { GIT_BRANCH=$1 @@ -615,7 +615,7 @@ function git_update_branch() { # git update using reference as a branch. # git_update_remote_branch ref -function git_update_remote_branch() { +function git_update_remote_branch { GIT_BRANCH=$1 @@ -625,7 +625,7 @@ function git_update_remote_branch() { # git update using reference as a tag. Be careful editing source at that repo # as working copy will be in a detached mode # git_update_tag ref -function git_update_tag() { +function git_update_tag { GIT_TAG=$1 @@ -641,7 +641,7 @@ function git_update_tag() { # Get the default value for HOST_IP # get_default_host_ip fixed_range floating_range host_ip_iface host_ip -function get_default_host_ip() { +function get_default_host_ip { local fixed_range=$1 local floating_range=$2 local host_ip_iface=$3 @@ -673,7 +673,7 @@ function get_default_host_ip() { # Fields are numbered starting with 1 # Reverse syntax is supported: -1 is the last field, -2 is second to last, etc. # get_field field-number -function get_field() { +function get_field { while read data; do if [ "$1" -lt 0 ]; then field="(\$(NF$1))" @@ -687,7 +687,7 @@ function get_field() { # Add a policy to a policy.json file # Do nothing if the policy already exists # ``policy_add policy_file policy_name policy_permissions`` -function policy_add() { +function policy_add { local policy_file=$1 local policy_name=$2 local policy_perm=$3 @@ -717,7 +717,7 @@ function policy_add() { # ================= # _get_package_dir -function _get_package_dir() { +function _get_package_dir { local pkg_dir if is_ubuntu; then pkg_dir=$FILES/apts @@ -734,7 +734,7 @@ function _get_package_dir() { # Wrapper for ``apt-get`` to set cache and proxy environment variables # Uses globals ``OFFLINE``, ``*_proxy`` # apt_get operation package [package ...] -function apt_get() { +function apt_get { local xtrace=$(set +o | grep xtrace) set +o xtrace @@ -759,7 +759,7 @@ function apt_get() { # - ``# NOPRIME`` defers installation to be performed later in `stack.sh` # - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection # of the package to the distros listed. The distro names are case insensitive. -function get_packages() { +function get_packages { local xtrace=$(set +o | grep xtrace) set +o xtrace local services=$@ @@ -870,7 +870,7 @@ function get_packages() { # Distro-agnostic package installer # install_package package [package ...] -function install_package() { +function install_package { local xtrace=$(set +o | grep xtrace) set +o xtrace if is_ubuntu; then @@ -895,7 +895,7 @@ function install_package() { # Distro-agnostic function to tell if a package is installed # is_package_installed package [package ...] -function is_package_installed() { +function is_package_installed { if [[ -z "$@" ]]; then return 1 fi @@ -915,7 +915,7 @@ function is_package_installed() { # Distro-agnostic package uninstaller # uninstall_package package [package ...] -function uninstall_package() { +function uninstall_package { if is_ubuntu; then apt_get purge "$@" elif is_fedora; then @@ -930,7 +930,7 @@ function uninstall_package() { # Wrapper for ``yum`` to set proxy environment variables # Uses globals ``OFFLINE``, ``*_proxy`` # yum_install package [package ...] -function yum_install() { +function yum_install { [[ "$OFFLINE" = "True" ]] && return local sudo="sudo" [[ "$(id -u)" = "0" ]] && sudo="env" @@ -941,7 +941,7 @@ function yum_install() { # zypper wrapper to set arguments correctly # zypper_install package [package ...] -function zypper_install() { +function zypper_install { [[ "$OFFLINE" = "True" ]] && return local sudo="sudo" [[ "$(id -u)" = "0" ]] && sudo="env" @@ -958,7 +958,7 @@ function zypper_install() { # files to produce the same logs as screen_it(). The log filename is derived # from the service name and global-and-now-misnamed SCREEN_LOGDIR # _run_process service "command-line" -function _run_process() { +function _run_process { local service=$1 local command="$2" @@ -983,7 +983,7 @@ function _run_process() { # Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``. # This is used for ``service_check`` when all the ``screen_it`` are called finished # init_service_check -function init_service_check() { +function init_service_check { SCREEN_NAME=${SCREEN_NAME:-stack} SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} @@ -996,7 +996,7 @@ function init_service_check() { # Find out if a process exists by partial name. # is_running name -function is_running() { +function is_running { local name=$1 ps auxw | grep -v grep | grep ${name} > /dev/null RC=$? @@ -1009,7 +1009,7 @@ function is_running() { # of screen_it() without screen. PIDs are written to # $SERVICE_DIR/$SCREEN_NAME/$service.pid # run_process service "command-line" -function run_process() { +function run_process { local service=$1 local command="$2" @@ -1092,7 +1092,7 @@ function screen_rc { # If screen is being used kill the screen window; this will catch processes # that did not leave a PID behind # screen_stop service -function screen_stop() { +function screen_stop { SCREEN_NAME=${SCREEN_NAME:-stack} SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} USE_SCREEN=$(trueorfalse True $USE_SCREEN) @@ -1112,7 +1112,7 @@ function screen_stop() { # Helper to get the status of each running service # service_check -function service_check() { +function service_check { local service local failures SCREEN_NAME=${SCREEN_NAME:-stack} @@ -1145,7 +1145,7 @@ function service_check() { # Get the path to the pip command. # get_pip_command -function get_pip_command() { +function get_pip_command { which pip || which pip-python if [ $? -ne 0 ]; then @@ -1155,7 +1155,7 @@ function get_pip_command() { # Get the path to the direcotry where python executables are installed. # get_python_exec_prefix -function get_python_exec_prefix() { +function get_python_exec_prefix { if is_fedora || is_suse; then echo "/usr/bin" else @@ -1221,7 +1221,7 @@ function pip_install { # # Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``, ``UNDO_REQUIREMENTS`` # setup_develop directory -function setup_develop() { +function setup_develop { local project_dir=$1 echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" @@ -1257,7 +1257,7 @@ function setup_develop() { # using pip before running `setup.py develop` # Uses globals ``STACK_USER`` # setup_develop_no_requirements_update directory -function setup_develop_no_requirements_update() { +function setup_develop_no_requirements_update { local project_dir=$1 pip_install -e $project_dir @@ -1271,7 +1271,7 @@ function setup_develop_no_requirements_update() { # remove extra commas from the input string (i.e. ``ENABLED_SERVICES``) # _cleanup_service_list service-list -function _cleanup_service_list () { +function _cleanup_service_list { echo "$1" | sed -e ' s/,,/,/g; s/^,//; @@ -1284,7 +1284,7 @@ function _cleanup_service_list () { # before a minimal installation # Uses global ``ENABLED_SERVICES`` # disable_all_services -function disable_all_services() { +function disable_all_services { ENABLED_SERVICES="" } @@ -1293,7 +1293,7 @@ function disable_all_services() { # ENABLED_SERVICES+=",-rabbit" # Uses global ``ENABLED_SERVICES`` # disable_negated_services -function disable_negated_services() { +function disable_negated_services { local tmpsvcs="${ENABLED_SERVICES}" local service for service in ${tmpsvcs//,/ }; do @@ -1314,7 +1314,7 @@ function disable_negated_services() { # for nova, glance, and neutron built into is_service_enabled(). # Uses global ``ENABLED_SERVICES`` # disable_service service [service ...] -function disable_service() { +function disable_service { local tmpsvcs=",${ENABLED_SERVICES}," local service for service in $@; do @@ -1335,7 +1335,7 @@ function disable_service() { # for nova, glance, and neutron built into is_service_enabled(). # Uses global ``ENABLED_SERVICES`` # enable_service service [service ...] -function enable_service() { +function enable_service { local tmpsvcs="${ENABLED_SERVICES}" for service in $@; do if ! is_service_enabled $service; then @@ -1369,7 +1369,7 @@ function enable_service() { # # Uses global ``ENABLED_SERVICES`` # is_service_enabled service [service ...] -function is_service_enabled() { +function is_service_enabled { local xtrace=$(set +o | grep xtrace) set +o xtrace local enabled=1 @@ -1424,7 +1424,7 @@ function use_exclusive_service { # Only run the command if the target file (the last arg) is not on an # NFS filesystem. -function _safe_permission_operation() { +function _safe_permission_operation { local xtrace=$(set +o | grep xtrace) set +o xtrace local args=( $@ ) @@ -1457,7 +1457,7 @@ function _safe_permission_operation() { # Exit 0 if address is in network or 1 if address is not in network # ip-range is in CIDR notation: 1.2.3.4/20 # address_in_net ip-address ip-range -function address_in_net() { +function address_in_net { local ip=$1 local range=$2 local masklen=${range#*/} @@ -1468,7 +1468,7 @@ function address_in_net() { # Add a user to a group. # add_user_to_group user group -function add_user_to_group() { +function add_user_to_group { local user=$1 local group=$2 @@ -1486,7 +1486,7 @@ function add_user_to_group() { # Convert CIDR notation to a IPv4 netmask # cidr2netmask cidr-bits -function cidr2netmask() { +function cidr2netmask { local maskpat="255 255 255 255" local maskdgt="254 252 248 240 224 192 128" set -- ${maskpat:0:$(( ($1 / 8) * 4 ))}${maskdgt:$(( (7 - ($1 % 8)) * 4 )):3} @@ -1509,7 +1509,7 @@ function cp_it { # # http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh -function export_proxy_variables() { +function export_proxy_variables { if [[ -n "$http_proxy" ]]; then export http_proxy=$http_proxy fi @@ -1522,7 +1522,7 @@ function export_proxy_variables() { } # Returns true if the directory is on a filesystem mounted via NFS. -function is_nfs_directory() { +function is_nfs_directory { local mount_type=`stat -f -L -c %T $1` test "$mount_type" == "nfs" } @@ -1530,7 +1530,7 @@ function is_nfs_directory() { # Return the network portion of the given IP address using netmask # netmask is in the traditional dotted-quad format # maskip ip-address netmask -function maskip() { +function maskip { local ip=$1 local mask=$2 local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}" @@ -1540,7 +1540,7 @@ function maskip() { # Service wrapper to restart services # restart_service service-name -function restart_service() { +function restart_service { if is_ubuntu; then sudo /usr/sbin/service $1 restart else @@ -1550,19 +1550,19 @@ function restart_service() { # Only change permissions of a file or directory if it is not on an # NFS filesystem. -function safe_chmod() { +function safe_chmod { _safe_permission_operation chmod $@ } # Only change ownership of a file or directory if it is not on an NFS # filesystem. -function safe_chown() { +function safe_chown { _safe_permission_operation chown $@ } # Service wrapper to start services # start_service service-name -function start_service() { +function start_service { if is_ubuntu; then sudo /usr/sbin/service $1 start else @@ -1572,7 +1572,7 @@ function start_service() { # Service wrapper to stop services # stop_service service-name -function stop_service() { +function stop_service { if is_ubuntu; then sudo /usr/sbin/service $1 stop else diff --git a/lib/apache b/lib/apache index 0e5712f56b..2d5e39a65d 100644 --- a/lib/apache +++ b/lib/apache @@ -50,7 +50,7 @@ fi # # Uses global ``APACHE_ENABLED_SERVICES`` # APACHE_ENABLED_SERVICES service [service ...] -function is_apache_enabled_service() { +function is_apache_enabled_service { services=$@ for service in ${services}; do [[ ,${APACHE_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 @@ -59,7 +59,7 @@ function is_apache_enabled_service() { } # install_apache_wsgi() - Install Apache server and wsgi module -function install_apache_wsgi() { +function install_apache_wsgi { # Apache installation, because we mark it NOPRIME if is_ubuntu; then # Install apache2, which is NOPRIME'd @@ -79,7 +79,7 @@ function install_apache_wsgi() { } # enable_apache_site() - Enable a particular apache site -function enable_apache_site() { +function enable_apache_site { local site=$@ if is_ubuntu; then sudo a2ensite ${site} @@ -90,7 +90,7 @@ function enable_apache_site() { } # disable_apache_site() - Disable a particular apache site -function disable_apache_site() { +function disable_apache_site { local site=$@ if is_ubuntu; then sudo a2dissite ${site} @@ -100,12 +100,12 @@ function disable_apache_site() { } # start_apache_server() - Start running apache server -function start_apache_server() { +function start_apache_server { start_service $APACHE_NAME } # stop_apache_server() - Stop running apache server -function stop_apache_server() { +function stop_apache_server { if [ -n "$APACHE_NAME" ]; then stop_service $APACHE_NAME else @@ -114,7 +114,7 @@ function stop_apache_server() { } # restart_apache_server -function restart_apache_server() { +function restart_apache_server { restart_service $APACHE_NAME } diff --git a/lib/baremetal b/lib/baremetal index d8cd7e936c..473de0dd39 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -166,7 +166,7 @@ BM_SHELL_IN_A_BOX=${BM_SHELL_IN_A_BOX:-http://shellinabox.googlecode.com/files/s # Check if baremetal is properly enabled # Returns false if VIRT_DRIVER is not baremetal, or if ENABLED_SERVICES # does not contain "baremetal" -function is_baremetal() { +function is_baremetal { if [[ "$ENABLED_SERVICES" =~ 'baremetal' && "$VIRT_DRIVER" = 'baremetal' ]]; then return 0 fi @@ -175,7 +175,7 @@ function is_baremetal() { # Install diskimage-builder and shell-in-a-box # so that we can build the deployment kernel & ramdisk -function prepare_baremetal_toolchain() { +function prepare_baremetal_toolchain { git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH git_clone $BM_POSEUR_REPO $BM_POSEUR_DIR $BM_POSEUR_BRANCH @@ -197,7 +197,7 @@ function prepare_baremetal_toolchain() { } # set up virtualized environment for devstack-gate testing -function create_fake_baremetal_env() { +function create_fake_baremetal_env { local bm_poseur="$BM_POSEUR_DIR/bm_poseur" # TODO(deva): add support for >1 VM sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-bridge @@ -211,14 +211,14 @@ function create_fake_baremetal_env() { BM_SECOND_MAC='12:34:56:78:90:12' } -function cleanup_fake_baremetal_env() { +function cleanup_fake_baremetal_env { local bm_poseur="$BM_POSEUR_DIR/bm_poseur" sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-vm sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-bridge } # prepare various directories needed by baremetal hypervisor -function configure_baremetal_nova_dirs() { +function configure_baremetal_nova_dirs { # ensure /tftpboot is prepared sudo mkdir -p /tftpboot sudo mkdir -p /tftpboot/pxelinux.cfg @@ -249,7 +249,7 @@ function configure_baremetal_nova_dirs() { # build deploy kernel+ramdisk, then upload them to glance # this function sets BM_DEPLOY_KERNEL_ID and BM_DEPLOY_RAMDISK_ID -function upload_baremetal_deploy() { +function upload_baremetal_deploy { token=$1 if [ "$BM_BUILD_DEPLOY_RAMDISK" = "True" ]; then @@ -281,7 +281,7 @@ function upload_baremetal_deploy() { # create a basic baremetal flavor, associated with deploy kernel & ramdisk # # Usage: create_baremetal_flavor -function create_baremetal_flavor() { +function create_baremetal_flavor { aki=$1 ari=$2 nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \ @@ -298,7 +298,7 @@ function create_baremetal_flavor() { # Sets KERNEL_ID and RAMDISK_ID # # Usage: extract_and_upload_k_and_r_from_image $token $file -function extract_and_upload_k_and_r_from_image() { +function extract_and_upload_k_and_r_from_image { token=$1 file=$2 image_name=$(basename "$file" ".qcow2") @@ -339,7 +339,7 @@ function extract_and_upload_k_and_r_from_image() { # Takes the same parameters, but has some peculiarities which made it # easier to create a separate method, rather than complicate the logic # of the existing function. -function upload_baremetal_image() { +function upload_baremetal_image { local image_url=$1 local token=$2 @@ -429,7 +429,7 @@ function upload_baremetal_image() { DEFAULT_IMAGE_NAME="${IMAGE_NAME%.img}" } -function clear_baremetal_of_all_nodes() { +function clear_baremetal_of_all_nodes { list=$(nova baremetal-node-list | awk -F '| ' 'NR>3 {print $2}' ) for node in $list; do nova baremetal-node-delete $node @@ -440,7 +440,7 @@ function clear_baremetal_of_all_nodes() { # Defaults to using BM_FIRST_MAC and BM_SECOND_MAC if parameters not specified # # Usage: add_baremetal_node -function add_baremetal_node() { +function add_baremetal_node { mac_1=${1:-$BM_FIRST_MAC} mac_2=${2:-$BM_SECOND_MAC} diff --git a/lib/ceilometer b/lib/ceilometer index 6c87d03b13..d20d628247 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -105,18 +105,18 @@ create_ceilometer_accounts() { # cleanup_ceilometer() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_ceilometer() { +function cleanup_ceilometer { mongo ceilometer --eval "db.dropDatabase();" } # configure_ceilometerclient() - Set config files, create data dirs, etc -function configure_ceilometerclient() { +function configure_ceilometerclient { setup_develop $CEILOMETERCLIENT_DIR sudo install -D -m 0644 -o $STACK_USER {$CEILOMETERCLIENT_DIR/tools/,/etc/bash_completion.d/}ceilometer.bash_completion } # configure_ceilometer() - Set config files, create data dirs, etc -function configure_ceilometer() { +function configure_ceilometer { setup_develop $CEILOMETER_DIR [ ! -d $CEILOMETER_CONF_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_CONF_DIR @@ -162,7 +162,7 @@ function configure_ceilometer() { fi } -function configure_mongodb() { +function configure_mongodb { if is_fedora; then # install mongodb client install_package mongodb @@ -174,7 +174,7 @@ function configure_mongodb() { } # init_ceilometer() - Initialize etc. -function init_ceilometer() { +function init_ceilometer { # Create cache dir sudo mkdir -p $CEILOMETER_AUTH_CACHE_DIR sudo chown $STACK_USER $CEILOMETER_AUTH_CACHE_DIR @@ -187,17 +187,17 @@ function init_ceilometer() { } # install_ceilometer() - Collect source and prepare -function install_ceilometer() { +function install_ceilometer { git_clone $CEILOMETER_REPO $CEILOMETER_DIR $CEILOMETER_BRANCH } # install_ceilometerclient() - Collect source and prepare -function install_ceilometerclient() { +function install_ceilometerclient { git_clone $CEILOMETERCLIENT_REPO $CEILOMETERCLIENT_DIR $CEILOMETERCLIENT_BRANCH } # start_ceilometer() - Start running processes, including screen -function start_ceilometer() { +function start_ceilometer { if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" fi @@ -216,7 +216,7 @@ function start_ceilometer() { } # stop_ceilometer() - Stop running processes -function stop_ceilometer() { +function stop_ceilometer { # Kill the ceilometer screen windows for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do screen_stop $serv diff --git a/lib/cinder b/lib/cinder index e8f30b683c..d003f5dc7b 100644 --- a/lib/cinder +++ b/lib/cinder @@ -102,7 +102,7 @@ function is_cinder_enabled { # _clean_lvm_lv removes all cinder LVM volumes # # Usage: _clean_lvm_lv $VOLUME_GROUP $VOLUME_NAME_PREFIX -function _clean_lvm_lv() { +function _clean_lvm_lv { local vg=$1 local lv_prefix=$2 @@ -119,7 +119,7 @@ function _clean_lvm_lv() { # volume group used by cinder # # Usage: _clean_lvm_backing_file() $VOLUME_GROUP -function _clean_lvm_backing_file() { +function _clean_lvm_backing_file { local vg=$1 # if there is no logical volume left, it's safe to attempt a cleanup @@ -136,7 +136,7 @@ function _clean_lvm_backing_file() { # cleanup_cinder() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_cinder() { +function cleanup_cinder { # ensure the volume group is cleared up because fails might # leave dead volumes in the group TARGETS=$(sudo tgtadm --op show --mode target) @@ -181,7 +181,7 @@ function cleanup_cinder() { } # configure_cinder_rootwrap() - configure Cinder's rootwrap -function configure_cinder_rootwrap() { +function configure_cinder_rootwrap { # Set the paths of certain binaries CINDER_ROOTWRAP=$(get_rootwrap_location cinder) @@ -212,7 +212,7 @@ function configure_cinder_rootwrap() { } # configure_cinder() - Set config files, create data dirs, etc -function configure_cinder() { +function configure_cinder { if [[ ! -d $CINDER_CONF_DIR ]]; then sudo mkdir -p $CINDER_CONF_DIR fi @@ -328,7 +328,7 @@ function configure_cinder() { # service cinder admin # if enabled # Migrated from keystone_data.sh -create_cinder_accounts() { +function create_cinder_accounts { SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") @@ -373,14 +373,14 @@ create_cinder_accounts() { } # create_cinder_cache_dir() - Part of the init_cinder() process -function create_cinder_cache_dir() { +function create_cinder_cache_dir { # Create cache dir sudo mkdir -p $CINDER_AUTH_CACHE_DIR sudo chown $STACK_USER $CINDER_AUTH_CACHE_DIR rm -f $CINDER_AUTH_CACHE_DIR/* } -create_cinder_volume_group() { +function create_cinder_volume_group { # According to the ``CINDER_MULTI_LVM_BACKEND`` value, configure one or two default volumes # group called ``stack-volumes`` (and ``stack-volumes2``) for the volume # service if it (they) does (do) not yet exist. If you don't wish to use a @@ -428,7 +428,7 @@ create_cinder_volume_group() { } # init_cinder() - Initialize database and volume group -function init_cinder() { +function init_cinder { # Force nova volumes off NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/osapi_volume,//") @@ -464,20 +464,20 @@ function init_cinder() { } # install_cinder() - Collect source and prepare -function install_cinder() { +function install_cinder { git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH setup_develop $CINDER_DIR } # install_cinderclient() - Collect source and prepare -function install_cinderclient() { +function install_cinderclient { git_clone $CINDERCLIENT_REPO $CINDERCLIENT_DIR $CINDERCLIENT_BRANCH setup_develop $CINDERCLIENT_DIR sudo install -D -m 0644 -o $STACK_USER {$CINDERCLIENT_DIR/tools/,/etc/bash_completion.d/}cinder.bash_completion } # apply config.d approach for cinder volumes directory -function _configure_tgt_for_config_d() { +function _configure_tgt_for_config_d { if [[ ! -d /etc/tgt/stack.d/ ]]; then sudo ln -sf $CINDER_STATE_PATH/volumes /etc/tgt/stack.d echo "include /etc/tgt/stack.d/*" | sudo tee -a /etc/tgt/targets.conf @@ -485,7 +485,7 @@ function _configure_tgt_for_config_d() { } # start_cinder() - Start running processes, including screen -function start_cinder() { +function start_cinder { if is_service_enabled c-vol; then # Delete any old stack.conf sudo rm -f /etc/tgt/conf.d/stack.conf @@ -529,7 +529,7 @@ function start_cinder() { } # stop_cinder() - Stop running processes -function stop_cinder() { +function stop_cinder { # Kill the cinder screen windows for serv in c-api c-bak c-sch c-vol; do screen_stop $serv diff --git a/lib/cinder_plugins/XenAPINFS b/lib/cinder_plugins/XenAPINFS index 72e1c1398c..fa10715bdf 100644 --- a/lib/cinder_plugins/XenAPINFS +++ b/lib/cinder_plugins/XenAPINFS @@ -27,7 +27,7 @@ set +o xtrace # ------------ # configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver() { +function configure_cinder_driver { iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver" iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL" iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME" diff --git a/lib/cinder_plugins/glusterfs b/lib/cinder_plugins/glusterfs index a0c5ae8d5e..b4196e4738 100644 --- a/lib/cinder_plugins/glusterfs +++ b/lib/cinder_plugins/glusterfs @@ -27,7 +27,7 @@ set +o xtrace # ------------ # configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver() { +function configure_cinder_driver { # To use glusterfs, set the following in localrc: # CINDER_DRIVER=glusterfs # CINDER_GLUSTERFS_SHARES="127.0.0.1:/vol1;127.0.0.1:/vol2" diff --git a/lib/cinder_plugins/nfs b/lib/cinder_plugins/nfs index ea2c9ce665..2d9d875245 100644 --- a/lib/cinder_plugins/nfs +++ b/lib/cinder_plugins/nfs @@ -27,7 +27,7 @@ set +o xtrace # ------------ # configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver() { +function configure_cinder_driver { iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.nfs.NfsDriver" iniset $CINDER_CONF DEFAULT nfs_shares_config "$CINDER_CONF_DIR/nfs_shares.conf" echo "$CINDER_NFS_SERVERPATH" | sudo tee "$CINDER_CONF_DIR/nfs_shares.conf" diff --git a/lib/cinder_plugins/sheepdog b/lib/cinder_plugins/sheepdog index 4435932371..30c60c6efe 100644 --- a/lib/cinder_plugins/sheepdog +++ b/lib/cinder_plugins/sheepdog @@ -27,7 +27,7 @@ set +o xtrace # ------------ # configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver() { +function configure_cinder_driver { iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver" } diff --git a/lib/cinder_plugins/solidfire b/lib/cinder_plugins/solidfire index 47c113e1a2..2c970b5adf 100644 --- a/lib/cinder_plugins/solidfire +++ b/lib/cinder_plugins/solidfire @@ -27,7 +27,7 @@ set +o xtrace # ------------ # configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver() { +function configure_cinder_driver { # To use solidfire, set the following in localrc: # CINDER_DRIVER=solidfire # SAN_IP= diff --git a/lib/cinder_plugins/vsphere b/lib/cinder_plugins/vsphere index c8cab6a8c1..436b060377 100644 --- a/lib/cinder_plugins/vsphere +++ b/lib/cinder_plugins/vsphere @@ -27,7 +27,7 @@ set +o xtrace # ------------ # configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver() { +function configure_cinder_driver { iniset $CINDER_CONF DEFAULT vmware_host_ip "$VMWAREAPI_IP" iniset $CINDER_CONF DEFAULT vmware_host_username "$VMWAREAPI_USER" iniset $CINDER_CONF DEFAULT vmware_host_password "$VMWAREAPI_PASSWORD" diff --git a/lib/config b/lib/config index 1678aec3fc..552aeb0ad1 100644 --- a/lib/config +++ b/lib/config @@ -25,7 +25,7 @@ CONFIG_AWK_CMD=${CONFIG_AWK_CMD:-awk} # Get the section for the specific group and config file # get_meta_section infile group configfile -function get_meta_section() { +function get_meta_section { local file=$1 local matchgroup=$2 local configfile=$3 @@ -57,7 +57,7 @@ function get_meta_section() { # Get a list of config files for a specific group # get_meta_section_files infile group -function get_meta_section_files() { +function get_meta_section_files { local file=$1 local matchgroup=$2 @@ -77,7 +77,7 @@ function get_meta_section_files() { # Merge the contents of a meta-config file into its destination config file # If configfile does not exist it will be created. # merge_config_file infile group configfile -function merge_config_file() { +function merge_config_file { local file=$1 local matchgroup=$2 local configfile=$3 @@ -106,7 +106,7 @@ function merge_config_file() { # Merge all of the files specified by group # merge_config_group infile group [group ...] -function merge_config_group() { +function merge_config_group { local localfile=$1; shift local matchgroups=$@ diff --git a/lib/gantt b/lib/gantt index 832d7590df..8db2ca1406 100644 --- a/lib/gantt +++ b/lib/gantt @@ -47,42 +47,42 @@ GANTT_BIN_DIR=$(get_python_exec_prefix) # cleanup_gantt() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_gantt() { +function cleanup_gantt { echo "Cleanup Gantt" } # configure_gantt() - Set config files, create data dirs, etc -function configure_gantt() { +function configure_gantt { echo "Configure Gantt" } # init_gantt() - Initialize database and volume group -function init_gantt() { +function init_gantt { echo "Initialize Gantt" } # install_gantt() - Collect source and prepare -function install_gantt() { +function install_gantt { git_clone $GANTT_REPO $GANTT_DIR $GANTT_BRANCH setup_develop $GANTT_DIR } # install_ganttclient() - Collect source and prepare -function install_ganttclient() { +function install_ganttclient { echo "Install Gantt Client" # git_clone $GANTTCLIENT_REPO $GANTTCLIENT_DIR $GANTTCLIENT_BRANCH # setup_develop $GANTTCLIENT_DIR } # start_gantt() - Start running processes, including screen -function start_gantt() { +function start_gantt { if is_service_enabled gantt; then screen_it gantt "cd $GANTT_DIR && $GANTT_BIN_DIR/gantt-scheduler --config-file $GANTT_CONF" fi } # stop_gantt() - Stop running processes -function stop_gantt() { +function stop_gantt { echo "Stop Gantt" screen_stop gantt } diff --git a/lib/glance b/lib/glance index 1ebeeb3b2e..8a4c21b3f2 100644 --- a/lib/glance +++ b/lib/glance @@ -68,14 +68,14 @@ function is_glance_enabled { # cleanup_glance() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_glance() { +function cleanup_glance { # kill instances (nova) # delete image files (glance) sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $GLANCE_AUTH_CACHE_DIR } # configure_glance() - Set config files, create data dirs, etc -function configure_glance() { +function configure_glance { if [[ ! -d $GLANCE_CONF_DIR ]]; then sudo mkdir -p $GLANCE_CONF_DIR fi @@ -160,7 +160,7 @@ function configure_glance() { } # create_glance_cache_dir() - Part of the init_glance() process -function create_glance_cache_dir() { +function create_glance_cache_dir { # Create cache dir sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/api @@ -171,7 +171,7 @@ function create_glance_cache_dir() { } # init_glance() - Initialize databases, etc. -function init_glance() { +function init_glance { # Delete existing images rm -rf $GLANCE_IMAGE_DIR mkdir -p $GLANCE_IMAGE_DIR @@ -190,19 +190,19 @@ function init_glance() { } # install_glanceclient() - Collect source and prepare -function install_glanceclient() { +function install_glanceclient { git_clone $GLANCECLIENT_REPO $GLANCECLIENT_DIR $GLANCECLIENT_BRANCH setup_develop $GLANCECLIENT_DIR } # install_glance() - Collect source and prepare -function install_glance() { +function install_glance { git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH setup_develop $GLANCE_DIR } # start_glance() - Start running processes, including screen -function start_glance() { +function start_glance { screen_it g-reg "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." @@ -212,7 +212,7 @@ function start_glance() { } # stop_glance() - Stop running processes -function stop_glance() { +function stop_glance { # Kill the Glance screen windows screen_stop g-api screen_stop g-reg diff --git a/lib/heat b/lib/heat index 972c35ce72..d0c0302016 100644 --- a/lib/heat +++ b/lib/heat @@ -47,14 +47,14 @@ TEMPEST_SERVICES+=,heat # cleanup_heat() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_heat() { +function cleanup_heat { sudo rm -rf $HEAT_AUTH_CACHE_DIR sudo rm -rf $HEAT_ENV_DIR sudo rm -rf $HEAT_TEMPLATES_DIR } # configure_heat() - Set config files, create data dirs, etc -function configure_heat() { +function configure_heat { setup_develop $HEAT_DIR if [[ ! -d $HEAT_CONF_DIR ]]; then @@ -137,7 +137,7 @@ function configure_heat() { } # init_heat() - Initialize database -function init_heat() { +function init_heat { # (re)create heat database recreate_database heat utf8 @@ -147,26 +147,26 @@ function init_heat() { } # create_heat_cache_dir() - Part of the init_heat() process -function create_heat_cache_dir() { +function create_heat_cache_dir { # Create cache dirs sudo mkdir -p $HEAT_AUTH_CACHE_DIR sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR } # install_heatclient() - Collect source and prepare -function install_heatclient() { +function install_heatclient { git_clone $HEATCLIENT_REPO $HEATCLIENT_DIR $HEATCLIENT_BRANCH setup_develop $HEATCLIENT_DIR sudo install -D -m 0644 -o $STACK_USER {$HEATCLIENT_DIR/tools/,/etc/bash_completion.d/}heat.bash_completion } # install_heat() - Collect source and prepare -function install_heat() { +function install_heat { git_clone $HEAT_REPO $HEAT_DIR $HEAT_BRANCH } # start_heat() - Start running processes, including screen -function start_heat() { +function start_heat { screen_it h-eng "cd $HEAT_DIR; bin/heat-engine --config-file=$HEAT_CONF" screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-file=$HEAT_CONF" screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-file=$HEAT_CONF" @@ -174,7 +174,7 @@ function start_heat() { } # stop_heat() - Stop running processes -function stop_heat() { +function stop_heat { # Kill the screen windows for serv in h-eng h-api h-api-cfn h-api-cw; do screen_stop $serv @@ -198,7 +198,7 @@ function disk_image_create { # create_heat_accounts() - Set up common required heat accounts # Note this is in addition to what is in files/keystone_data.sh -function create_heat_accounts() { +function create_heat_accounts { # Note we have to pass token/endpoint here because the current endpoint and # version negotiation in OSC means just --os-identity-api-version=3 won't work KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" diff --git a/lib/horizon b/lib/horizon index 2f5795d1ca..27c2d26a01 100644 --- a/lib/horizon +++ b/lib/horizon @@ -39,7 +39,7 @@ TEMPEST_SERVICES+=,horizon # --------- # utility method of setting python option -function _horizon_config_set() { +function _horizon_config_set { local file=$1 local section=$2 local option=$3 @@ -64,7 +64,7 @@ function _horizon_config_set() { # cleanup_horizon() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_horizon() { +function cleanup_horizon { if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then # If ``/usr/bin/node`` points into ``$DEST`` # we installed it via ``install_nodejs`` @@ -75,12 +75,12 @@ function cleanup_horizon() { } # configure_horizon() - Set config files, create data dirs, etc -function configure_horizon() { +function configure_horizon { setup_develop $HORIZON_DIR } # init_horizon() - Initialize databases, etc. -function init_horizon() { +function init_horizon { # ``local_settings.py`` is used to override horizon default settings. local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py cp $HORIZON_SETTINGS $local_settings @@ -143,7 +143,7 @@ function init_horizon() { } # install_horizon() - Collect source and prepare -function install_horizon() { +function install_horizon { # Apache installation, because we mark it NOPRIME install_apache_wsgi @@ -151,13 +151,13 @@ function install_horizon() { } # start_horizon() - Start running processes, including screen -function start_horizon() { +function start_horizon { restart_apache_server screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log" } # stop_horizon() - Stop running processes (non-screen) -function stop_horizon() { +function stop_horizon { stop_apache_server } diff --git a/lib/infra b/lib/infra index 0dcf0ad980..7f70ff2787 100644 --- a/lib/infra +++ b/lib/infra @@ -27,7 +27,7 @@ REQUIREMENTS_DIR=$DEST/requirements # ------------ # unfubar_setuptools() - Unbreak the giant mess that is the current state of setuptools -function unfubar_setuptools() { +function unfubar_setuptools { # this is a giant game of who's on first, but it does consistently work # there is hope that upstream python packaging fixes this in the future echo_summary "Unbreaking setuptools" @@ -40,7 +40,7 @@ function unfubar_setuptools() { # install_infra() - Collect source and prepare -function install_infra() { +function install_infra { # bring down global requirements git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH diff --git a/lib/ironic b/lib/ironic index 607b13125a..177188dd06 100644 --- a/lib/ironic +++ b/lib/ironic @@ -57,25 +57,25 @@ function is_ironic_enabled { } # install_ironic() - Collect source and prepare -function install_ironic() { +function install_ironic { git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH setup_develop $IRONIC_DIR } # install_ironicclient() - Collect sources and prepare -function install_ironicclient() { +function install_ironicclient { git_clone $IRONICCLIENT_REPO $IRONICCLIENT_DIR $IRONICCLIENT_BRANCH setup_develop $IRONICCLIENT_DIR } # cleanup_ironic() - Remove residual data files, anything left over from previous # runs that would need to clean up. -function cleanup_ironic() { +function cleanup_ironic { sudo rm -rf $IRONIC_AUTH_CACHE_DIR } # configure_ironic() - Set config files, create data dirs, etc -function configure_ironic() { +function configure_ironic { if [[ ! -d $IRONIC_CONF_DIR ]]; then sudo mkdir -p $IRONIC_CONF_DIR fi @@ -101,7 +101,7 @@ function configure_ironic() { # configure_ironic_api() - Is used by configure_ironic(). Performs # API specific configuration. -function configure_ironic_api() { +function configure_ironic_api { iniset $IRONIC_CONF_FILE DEFAULT auth_strategy keystone iniset $IRONIC_CONF_FILE DEFAULT policy_file $IRONIC_POLICY_JSON iniset $IRONIC_CONF_FILE keystone_authtoken auth_host $KEYSTONE_AUTH_HOST @@ -120,7 +120,7 @@ function configure_ironic_api() { # configure_ironic_conductor() - Is used by configure_ironic(). # Sets conductor specific settings. -function configure_ironic_conductor() { +function configure_ironic_conductor { cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR @@ -128,7 +128,7 @@ function configure_ironic_conductor() { } # create_ironic_cache_dir() - Part of the init_ironic() process -function create_ironic_cache_dir() { +function create_ironic_cache_dir { # Create cache dir sudo mkdir -p $IRONIC_AUTH_CACHE_DIR/api sudo chown $STACK_USER $IRONIC_AUTH_CACHE_DIR/api @@ -143,7 +143,7 @@ function create_ironic_cache_dir() { # Tenant User Roles # ------------------------------------------------------------------ # service ironic admin # if enabled -create_ironic_accounts() { +function create_ironic_accounts { SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") @@ -178,7 +178,7 @@ create_ironic_accounts() { # init_ironic() - Initialize databases, etc. -function init_ironic() { +function init_ironic { # (Re)create ironic database recreate_database ironic utf8 @@ -192,7 +192,7 @@ function init_ironic() { } # start_ironic() - Start running processes, including screen -function start_ironic() { +function start_ironic { # Start Ironic API server, if enabled. if is_service_enabled ir-api; then start_ironic_api @@ -206,7 +206,7 @@ function start_ironic() { # start_ironic_api() - Used by start_ironic(). # Starts Ironic API server. -function start_ironic_api() { +function start_ironic_api { screen_it ir-api "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE" echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then @@ -216,13 +216,13 @@ function start_ironic_api() { # start_ironic_conductor() - Used by start_ironic(). # Starts Ironic conductor. -function start_ironic_conductor() { +function start_ironic_conductor { screen_it ir-cond "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-conductor --config-file=$IRONIC_CONF_FILE" # TODO(romcheg): Find a way to check whether the conductor has started. } # stop_ironic() - Stop running processes -function stop_ironic() { +function stop_ironic { # Kill the Ironic screen windows screen -S $SCREEN_NAME -p ir-api -X kill screen -S $SCREEN_NAME -p ir-cond -X kill diff --git a/lib/keystone b/lib/keystone index 73af1d356d..0548c24e87 100644 --- a/lib/keystone +++ b/lib/keystone @@ -90,7 +90,7 @@ fi # --------- # cleanup_keystone() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_keystone() { +function cleanup_keystone { # kill instances (nova) # delete image files (glance) # This function intentionally left blank @@ -98,14 +98,14 @@ function cleanup_keystone() { } # _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file -function _cleanup_keystone_apache_wsgi() { +function _cleanup_keystone_apache_wsgi { sudo rm -f $KEYSTONE_WSGI_DIR/*.wsgi disable_apache_site keystone sudo rm -f /etc/$APACHE_NAME/$APACHE_CONF_DIR/keystone } # _config_keystone_apache_wsgi() - Set WSGI config files of Keystone -function _config_keystone_apache_wsgi() { +function _config_keystone_apache_wsgi { sudo mkdir -p $KEYSTONE_WSGI_DIR # copy proxy vhost and wsgi file @@ -125,7 +125,7 @@ function _config_keystone_apache_wsgi() { } # configure_keystone() - Set config files, create data dirs, etc -function configure_keystone() { +function configure_keystone { if [[ ! -d $KEYSTONE_CONF_DIR ]]; then sudo mkdir -p $KEYSTONE_CONF_DIR fi @@ -272,7 +272,7 @@ function configure_keystone() { # invisible_to_admin demo Member # Migrated from keystone_data.sh -create_keystone_accounts() { +function create_keystone_accounts { # admin ADMIN_TENANT=$(openstack project create \ @@ -346,14 +346,14 @@ create_keystone_accounts() { # Configure the API version for the OpenStack projects. # configure_API_version conf_file version -function configure_API_version() { +function configure_API_version { local conf_file=$1 local api_version=$2 iniset $conf_file keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$api_version } # init_keystone() - Initialize databases, etc. -function init_keystone() { +function init_keystone { if is_service_enabled ldap; then init_ldap fi @@ -377,14 +377,14 @@ function init_keystone() { } # install_keystoneclient() - Collect source and prepare -function install_keystoneclient() { +function install_keystoneclient { git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH setup_develop $KEYSTONECLIENT_DIR sudo install -D -m 0644 -o $STACK_USER {$KEYSTONECLIENT_DIR/tools/,/etc/bash_completion.d/}keystone.bash_completion } # install_keystone() - Collect source and prepare -function install_keystone() { +function install_keystone { # only install ldap if the service has been enabled if is_service_enabled ldap; then install_ldap @@ -408,7 +408,7 @@ function install_keystone() { } # start_keystone() - Start running processes, including screen -function start_keystone() { +function start_keystone { # Get right service port for testing local service_port=$KEYSTONE_SERVICE_PORT if is_service_enabled tls-proxy; then @@ -436,7 +436,7 @@ function start_keystone() { } # stop_keystone() - Stop running processes -function stop_keystone() { +function stop_keystone { # Kill the Keystone screen window screen_stop key } diff --git a/lib/ldap b/lib/ldap index e4bd41624d..51d02519af 100644 --- a/lib/ldap +++ b/lib/ldap @@ -49,7 +49,7 @@ fi # Perform common variable substitutions on the data files # _ldap_varsubst file -function _ldap_varsubst() { +function _ldap_varsubst { local infile=$1 sed -e " s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER| @@ -62,7 +62,7 @@ function _ldap_varsubst() { } # clean_ldap() - Remove ldap server -function cleanup_ldap() { +function cleanup_ldap { uninstall_package $(get_packages ldap) if is_ubuntu; then uninstall_package slapd ldap-utils libslp1 @@ -76,7 +76,7 @@ function cleanup_ldap() { # init_ldap # init_ldap() - Initialize databases, etc. -function init_ldap() { +function init_ldap { local keystone_ldif TMP_LDAP_DIR=$(mktemp -d -t ldap.$$.XXXXXXXXXX) @@ -106,7 +106,7 @@ function init_ldap() { # install_ldap # install_ldap() - Collect source and prepare -function install_ldap() { +function install_ldap { echo "Installing LDAP inside function" echo "os_VENDOR is $os_VENDOR" @@ -143,17 +143,17 @@ function install_ldap() { } # start_ldap() - Start LDAP -function start_ldap() { +function start_ldap { sudo service $LDAP_SERVICE_NAME restart } # stop_ldap() - Stop LDAP -function stop_ldap() { +function stop_ldap { sudo service $LDAP_SERVICE_NAME stop } # clear_ldap_state() - Clear LDAP State -function clear_ldap_state() { +function clear_ldap_state { ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN" } diff --git a/lib/marconi b/lib/marconi index 1c8be49291..8cfc55c1dd 100644 --- a/lib/marconi +++ b/lib/marconi @@ -73,19 +73,19 @@ function is_marconi_enabled { # cleanup_marconi() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_marconi() { +function cleanup_marconi { if ! timeout $SERVICE_TIMEOUT sh -c "while ! mongo marconi --eval 'db.dropDatabase();'; do sleep 1; done"; then die $LINENO "Mongo DB did not start" fi } # configure_marconiclient() - Set config files, create data dirs, etc -function configure_marconiclient() { +function configure_marconiclient { setup_develop $MARCONICLIENT_DIR } # configure_marconi() - Set config files, create data dirs, etc -function configure_marconi() { +function configure_marconi { setup_develop $MARCONI_DIR [ ! -d $MARCONI_CONF_DIR ] && sudo mkdir -m 755 -p $MARCONI_CONF_DIR @@ -110,7 +110,7 @@ function configure_marconi() { fi } -function configure_mongodb() { +function configure_mongodb { # Set nssize to 2GB. This increases the number of namespaces supported # # per database. if is_ubuntu; then @@ -126,7 +126,7 @@ function configure_mongodb() { } # init_marconi() - Initialize etc. -function init_marconi() { +function init_marconi { # Create cache dir sudo mkdir -p $MARCONI_AUTH_CACHE_DIR sudo chown $STACK_USER $MARCONI_AUTH_CACHE_DIR @@ -134,19 +134,19 @@ function init_marconi() { } # install_marconi() - Collect source and prepare -function install_marconi() { +function install_marconi { git_clone $MARCONI_REPO $MARCONI_DIR $MARCONI_BRANCH setup_develop $MARCONI_DIR } # install_marconiclient() - Collect source and prepare -function install_marconiclient() { +function install_marconiclient { git_clone $MARCONICLIENT_REPO $MARCONICLIENT_DIR $MARCONICLIENT_BRANCH setup_develop $MARCONICLIENT_DIR } # start_marconi() - Start running processes, including screen -function start_marconi() { +function start_marconi { screen_it marconi-server "marconi-server --config-file $MARCONI_CONF" echo "Waiting for Marconi to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT/v1/health; do sleep 1; done"; then @@ -155,14 +155,14 @@ function start_marconi() { } # stop_marconi() - Stop running processes -function stop_marconi() { +function stop_marconi { # Kill the marconi screen windows for serv in marconi-server; do screen -S $SCREEN_NAME -p $serv -X kill done } -function create_marconi_accounts() { +function create_marconi_accounts { SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") diff --git a/lib/neutron b/lib/neutron index df276c71d5..35575c0379 100644 --- a/lib/neutron +++ b/lib/neutron @@ -253,7 +253,7 @@ function is_neutron_enabled { # configure_neutron() # Set common config for all neutron server and agents. -function configure_neutron() { +function configure_neutron { _configure_neutron_common iniset_rpc_backend neutron $NEUTRON_CONF DEFAULT @@ -289,7 +289,7 @@ function configure_neutron() { _configure_neutron_debug_command } -function create_nova_conf_neutron() { +function create_nova_conf_neutron { iniset $NOVA_CONF DEFAULT network_api_class "nova.network.neutronv2.api.API" iniset $NOVA_CONF DEFAULT neutron_admin_username "$Q_ADMIN_USERNAME" iniset $NOVA_CONF DEFAULT neutron_admin_password "$SERVICE_PASSWORD" @@ -316,7 +316,7 @@ function create_nova_conf_neutron() { } # create_neutron_cache_dir() - Part of the _neutron_setup_keystone() process -function create_neutron_cache_dir() { +function create_neutron_cache_dir { # Create cache dir sudo mkdir -p $NEUTRON_AUTH_CACHE_DIR sudo chown $STACK_USER $NEUTRON_AUTH_CACHE_DIR @@ -330,7 +330,7 @@ function create_neutron_cache_dir() { # service neutron admin # if enabled # Migrated from keystone_data.sh -function create_neutron_accounts() { +function create_neutron_accounts { SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") @@ -362,7 +362,7 @@ function create_neutron_accounts() { fi } -function create_neutron_initial_network() { +function create_neutron_initial_network { TENANT_ID=$(openstack project list | grep " demo " | get_field 1) die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for demo" @@ -429,27 +429,27 @@ function create_neutron_initial_network() { } # init_neutron() - Initialize databases, etc. -function init_neutron() { +function init_neutron { recreate_database $Q_DB_NAME utf8 # Run Neutron db migrations $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head } # install_neutron() - Collect source and prepare -function install_neutron() { +function install_neutron { git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH setup_develop $NEUTRON_DIR } # install_neutronclient() - Collect source and prepare -function install_neutronclient() { +function install_neutronclient { git_clone $NEUTRONCLIENT_REPO $NEUTRONCLIENT_DIR $NEUTRONCLIENT_BRANCH setup_develop $NEUTRONCLIENT_DIR sudo install -D -m 0644 -o $STACK_USER {$NEUTRONCLIENT_DIR/tools/,/etc/bash_completion.d/}neutron.bash_completion } # install_neutron_agent_packages() - Collect source and prepare -function install_neutron_agent_packages() { +function install_neutron_agent_packages { # install packages that are specific to plugin agent(s) if is_service_enabled q-agt q-dhcp q-l3; then neutron_plugin_install_agent_packages @@ -461,7 +461,7 @@ function install_neutron_agent_packages() { } # Start running processes, including screen -function start_neutron_service_and_check() { +function start_neutron_service_and_check { # build config-file options local cfg_file local CFG_FILE_OPTIONS="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" @@ -477,7 +477,7 @@ function start_neutron_service_and_check() { } # Start running processes, including screen -function start_neutron_agents() { +function start_neutron_agents { # Start up the neutron agents if enabled screen_it q-agt "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" screen_it q-dhcp "cd $NEUTRON_DIR && python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE" @@ -510,7 +510,7 @@ function start_neutron_agents() { } # stop_neutron() - Stop running processes (non-screen) -function stop_neutron() { +function stop_neutron { if is_service_enabled q-dhcp; then pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') [ ! -z "$pid" ] && sudo kill -9 $pid @@ -535,7 +535,7 @@ function stop_neutron() { # cleanup_neutron() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_neutron() { +function cleanup_neutron { if is_neutron_ovs_base_plugin; then neutron_ovs_base_cleanup fi @@ -549,7 +549,7 @@ function cleanup_neutron() { # _configure_neutron_common() # Set common config for all neutron server and agents. # This MUST be called before other ``_configure_neutron_*`` functions. -function _configure_neutron_common() { +function _configure_neutron_common { # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find if [[ ! -d $NEUTRON_CONF_DIR ]]; then sudo mkdir -p $NEUTRON_CONF_DIR @@ -611,7 +611,7 @@ function _configure_neutron_common() { _neutron_setup_rootwrap } -function _configure_neutron_debug_command() { +function _configure_neutron_debug_command { if [[ "$Q_USE_DEBUG_COMMAND" != "True" ]]; then return fi @@ -628,7 +628,7 @@ function _configure_neutron_debug_command() { neutron_plugin_configure_debug_command } -function _configure_neutron_dhcp_agent() { +function _configure_neutron_dhcp_agent { AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini @@ -652,7 +652,7 @@ function _configure_neutron_dhcp_agent() { neutron_plugin_configure_dhcp_agent } -function _configure_neutron_l3_agent() { +function _configure_neutron_l3_agent { Q_L3_ENABLED=True # for l3-agent, only use per tenant router if we have namespaces Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE @@ -676,7 +676,7 @@ function _configure_neutron_l3_agent() { neutron_plugin_configure_l3_agent } -function _configure_neutron_metadata_agent() { +function _configure_neutron_metadata_agent { AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent" Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini @@ -691,30 +691,29 @@ function _configure_neutron_metadata_agent() { } -function _configure_neutron_lbaas() { +function _configure_neutron_lbaas { neutron_agent_lbaas_configure_common neutron_agent_lbaas_configure_agent } -function _configure_neutron_metering() { +function _configure_neutron_metering { neutron_agent_metering_configure_common neutron_agent_metering_configure_agent } -function _configure_neutron_fwaas() { +function _configure_neutron_fwaas { neutron_fwaas_configure_common neutron_fwaas_configure_driver } -function _configure_neutron_vpn() -{ +function _configure_neutron_vpn { neutron_vpn_install_agent_packages neutron_vpn_configure_common } # _configure_neutron_plugin_agent() - Set config files for neutron plugin agent # It is called when q-agt is enabled. -function _configure_neutron_plugin_agent() { +function _configure_neutron_plugin_agent { # Specify the default root helper prior to agent configuration to # ensure that an agent's configuration can override the default iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND" @@ -727,7 +726,7 @@ function _configure_neutron_plugin_agent() { # _configure_neutron_service() - Set config files for neutron service # It is called when q-svc is enabled. -function _configure_neutron_service() { +function _configure_neutron_service { Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json @@ -765,7 +764,7 @@ function _configure_neutron_service() { #------------------ # _neutron_service_plugin_class_add() - add service plugin class -function _neutron_service_plugin_class_add() { +function _neutron_service_plugin_class_add { local service_plugin_class=$1 if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class @@ -775,7 +774,7 @@ function _neutron_service_plugin_class_add() { } # _neutron_setup_rootwrap() - configure Neutron's rootwrap -function _neutron_setup_rootwrap() { +function _neutron_setup_rootwrap { if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then return fi @@ -815,7 +814,7 @@ function _neutron_setup_rootwrap() { } # Configures keystone integration for neutron service and agents -function _neutron_setup_keystone() { +function _neutron_setup_keystone { local conf_file=$1 local section=$2 local use_auth_url=$3 @@ -842,7 +841,7 @@ function _neutron_setup_keystone() { fi } -function _neutron_setup_interface_driver() { +function _neutron_setup_interface_driver { # ovs_use_veth needs to be set before the plugin configuration # occurs to allow plugins to override the setting. @@ -854,14 +853,14 @@ function _neutron_setup_interface_driver() { # Functions for Neutron Exercises #-------------------------------- -function delete_probe() { +function delete_probe { local from_net="$1" net_id=`_get_net_id $from_net` probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id } -function setup_neutron_debug() { +function setup_neutron_debug { if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME` neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $public_net_id @@ -870,23 +869,23 @@ function setup_neutron_debug() { fi } -function teardown_neutron_debug() { +function teardown_neutron_debug { delete_probe $PUBLIC_NETWORK_NAME delete_probe $PRIVATE_NETWORK_NAME } -function _get_net_id() { +function _get_net_id { neutron --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD net-list | grep $1 | awk '{print $2}' } -function _get_probe_cmd_prefix() { +function _get_probe_cmd_prefix { local from_net="$1" net_id=`_get_net_id $from_net` probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" } -function _ping_check_neutron() { +function _ping_check_neutron { local from_net=$1 local ip=$2 local timeout_sec=$3 @@ -908,7 +907,7 @@ function _ping_check_neutron() { } # ssh check -function _ssh_check_neutron() { +function _ssh_check_neutron { local from_net=$1 local key_file=$2 local ip=$3 @@ -934,39 +933,39 @@ for f in $TOP_DIR/lib/neutron_thirdparty/*; do fi done -function _neutron_third_party_do() { +function _neutron_third_party_do { for third_party in ${NEUTRON_THIRD_PARTIES//,/ }; do ${1}_${third_party} done } # configure_neutron_third_party() - Set config files, create data dirs, etc -function configure_neutron_third_party() { +function configure_neutron_third_party { _neutron_third_party_do configure } # init_neutron_third_party() - Initialize databases, etc. -function init_neutron_third_party() { +function init_neutron_third_party { _neutron_third_party_do init } # install_neutron_third_party() - Collect source and prepare -function install_neutron_third_party() { +function install_neutron_third_party { _neutron_third_party_do install } # start_neutron_third_party() - Start running processes, including screen -function start_neutron_third_party() { +function start_neutron_third_party { _neutron_third_party_do start } # stop_neutron_third_party - Stop running processes (non-screen) -function stop_neutron_third_party() { +function stop_neutron_third_party { _neutron_third_party_do stop } # check_neutron_third_party_integration() - Check that third party integration is sane -function check_neutron_third_party_integration() { +function check_neutron_third_party_integration { _neutron_third_party_do check } diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index 1e4aa00121..4cb0da84ea 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -8,15 +8,15 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/ovs_base source $TOP_DIR/lib/neutron_thirdparty/bigswitch_floodlight # for third party service specific configuration values -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { : } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { _neutron_ovs_base_install_agent_packages } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/bigswitch Q_PLUGIN_CONF_FILENAME=restproxy.ini Q_DB_NAME="restproxy_neutron" @@ -25,23 +25,23 @@ function neutron_plugin_configure_common() { BS_FL_CONTROLLER_TIMEOUT=${BS_FL_CONTROLLER_TIMEOUT:-10} } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { _neutron_ovs_base_configure_debug_command } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { : } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { _neutron_ovs_base_configure_l3_agent } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { : } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { iniset /$Q_PLUGIN_CONF_FILE restproxy servers $BS_FL_CONTROLLERS_PORT iniset /$Q_PLUGIN_CONF_FILE restproxy servertimeout $BS_FL_CONTROLLER_TIMEOUT if [ "$BS_FL_VIF_DRIVER" = "ivs" ]; then @@ -49,7 +49,7 @@ function neutron_plugin_configure_service() { fi } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 if [ "$BS_FL_VIF_DRIVER" = "ivs" ]; then iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.IVSInterfaceDriver @@ -59,12 +59,12 @@ function neutron_plugin_setup_interface_driver() { } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 1 means False here return 1 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade index 8e18d04984..4443fa7823 100644 --- a/lib/neutron_plugins/brocade +++ b/lib/neutron_plugins/brocade @@ -5,53 +5,53 @@ BRCD_XTRACE=$(set +o | grep xtrace) set +o xtrace -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { return 1 } -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { install_package bridge-utils } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/brocade Q_PLUGIN_CONF_FILENAME=brocade.ini Q_DB_NAME="brcd_neutron" Q_PLUGIN_CLASS="neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2" } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-linuxbridge-agent" } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 0 means True here return 0 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco index 8948be6de4..7728eb177f 100644 --- a/lib/neutron_plugins/cisco +++ b/lib/neutron_plugins/cisco @@ -27,12 +27,12 @@ NCCLIENT_REPO=${NCCLIENT_REPO:-${GIT_BASE}/CiscoSystems/ncclient.git} NCCLIENT_BRANCH=${NCCLIENT_BRANCH:-master} # This routine put a prefix on an existing function name -function _prefix_function() { +function _prefix_function { declare -F $1 > /dev/null || die "$1 doesn't exist" eval "$(echo "${2}_${1}()"; declare -f ${1} | tail -n +2)" } -function _has_ovs_subplugin() { +function _has_ovs_subplugin { local subplugin for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do if [[ "$subplugin" == "openvswitch" ]]; then @@ -42,7 +42,7 @@ function _has_ovs_subplugin() { return 1 } -function _has_nexus_subplugin() { +function _has_nexus_subplugin { local subplugin for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do if [[ "$subplugin" == "nexus" ]]; then @@ -52,7 +52,7 @@ function _has_nexus_subplugin() { return 1 } -function _has_n1kv_subplugin() { +function _has_n1kv_subplugin { local subplugin for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do if [[ "$subplugin" == "n1kv" ]]; then @@ -64,7 +64,7 @@ function _has_n1kv_subplugin() { # This routine populates the cisco config file with the information for # a particular nexus switch -function _config_switch() { +function _config_switch { local cisco_cfg_file=$1 local switch_ip=$2 local username=$3 @@ -99,7 +99,7 @@ _prefix_function neutron_plugin_setup_interface_driver ovs _prefix_function has_neutron_plugin_security_group ovs # Check the version of the installed ncclient package -function check_ncclient_version() { +function check_ncclient_version { python << EOF version = '$NCCLIENT_VERSION' import sys @@ -115,13 +115,13 @@ EOF } # Install the ncclient package -function install_ncclient() { +function install_ncclient { git_clone $NCCLIENT_REPO $NCCLIENT_DIR $NCCLIENT_BRANCH (cd $NCCLIENT_DIR; sudo python setup.py install) } # Check if the required version of ncclient has been installed -function is_ncclient_installed() { +function is_ncclient_installed { # Check if the Cisco ncclient repository exists if [[ -d $NCCLIENT_DIR ]]; then remotes=$(cd $NCCLIENT_DIR; git remote -v | grep fetch | awk '{ print $2}') @@ -144,7 +144,7 @@ function is_ncclient_installed() { return 0 } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { if _has_ovs_subplugin; then ovs_has_neutron_plugin_security_group else @@ -152,14 +152,14 @@ function has_neutron_plugin_security_group() { fi } -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { # Cisco uses OVS if openvswitch subplugin is deployed _has_ovs_subplugin return } # populate required nova configuration parameters -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { if _has_ovs_subplugin; then ovs_neutron_plugin_create_nova_conf else @@ -167,13 +167,13 @@ function neutron_plugin_create_nova_conf() { fi } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { # Cisco plugin uses openvswitch to operate in one of its configurations ovs_neutron_plugin_install_agent_packages } # Configure common parameters -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { # setup default subplugins if [ ! -v Q_CISCO_PLUGIN_SUBPLUGINS ]; then declare -ga Q_CISCO_PLUGIN_SUBPLUGINS @@ -191,23 +191,23 @@ function neutron_plugin_configure_common() { Q_DB_NAME=cisco_neutron } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { if _has_ovs_subplugin; then ovs_neutron_plugin_configure_debug_command fi } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { if _has_ovs_subplugin; then ovs_neutron_plugin_configure_l3_agent fi } -function _configure_nexus_subplugin() { +function _configure_nexus_subplugin { local cisco_cfg_file=$1 # Install a known compatible ncclient from the Cisco repository if necessary @@ -252,7 +252,7 @@ function _configure_nexus_subplugin() { } # Configure n1kv plugin -function _configure_n1kv_subplugin() { +function _configure_n1kv_subplugin { local cisco_cfg_file=$1 # populate the cisco plugin cfg file with the VSM information @@ -270,13 +270,13 @@ function _configure_n1kv_subplugin() { _neutron_ovs_base_setup_bridge $OVS_BRIDGE } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { if _has_ovs_subplugin; then ovs_neutron_plugin_configure_plugin_agent fi } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { local subplugin local cisco_cfg_file @@ -318,7 +318,7 @@ function neutron_plugin_configure_service() { fi } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver } diff --git a/lib/neutron_plugins/embrane b/lib/neutron_plugins/embrane index 325e9397e6..62f9737e51 100644 --- a/lib/neutron_plugins/embrane +++ b/lib/neutron_plugins/embrane @@ -7,7 +7,7 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/openvswitch -save_function() { +function save_function { local ORIG_FUNC=$(declare -f $1) local NEW_FUNC="$2${ORIG_FUNC#$1}" eval "$NEW_FUNC" @@ -15,14 +15,14 @@ save_function() { save_function neutron_plugin_configure_service _neutron_plugin_configure_service -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/embrane Q_PLUGIN_CONF_FILENAME=heleos_conf.ini Q_DB_NAME="ovs_neutron" Q_PLUGIN_CLASS="neutron.plugins.embrane.plugins.embrane_ovs_plugin.EmbraneOvsPlugin" } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { _neutron_plugin_configure_service iniset /$Q_PLUGIN_CONF_FILE heleos esm_mgmt $HELEOS_ESM_MGMT iniset /$Q_PLUGIN_CONF_FILE heleos admin_username $HELEOS_ADMIN_USERNAME diff --git a/lib/neutron_plugins/linuxbridge b/lib/neutron_plugins/linuxbridge index 37bc748c37..362fd5b39e 100644 --- a/lib/neutron_plugins/linuxbridge +++ b/lib/neutron_plugins/linuxbridge @@ -7,14 +7,14 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/linuxbridge_agent -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/linuxbridge Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini Q_DB_NAME="neutron_linux_bridge" Q_PLUGIN_CLASS="neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2" } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE vlans tenant_network_type vlan else @@ -47,7 +47,7 @@ function neutron_plugin_configure_service() { done } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 0 means True here return 0 } diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index 85e8c085be..74799e477c 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -5,33 +5,33 @@ PLUGIN_XTRACE=$(set +o | grep xtrace) set +o xtrace -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { # linuxbridge doesn't use OVS return 1 } -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { : } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { install_package bridge-utils } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { # Setup physical network interface mappings. Override # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more # complex physical network configurations. @@ -63,12 +63,12 @@ function neutron_plugin_configure_plugin_agent() { done } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet index dd3b2baeca..742e3b2f0f 100644 --- a/lib/neutron_plugins/midonet +++ b/lib/neutron_plugins/midonet @@ -9,32 +9,32 @@ MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-ap MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { # MidoNet does not use l3-agent # 0 means True here return 1 } -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { : } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/midonet Q_PLUGIN_CONF_FILENAME=midonet.ini Q_DB_NAME="neutron_midonet" Q_PLUGIN_CLASS="neutron.plugins.midonet.plugin.MidonetPluginV2" } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { : } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { DHCP_DRIVER=${DHCP_DRIVER:-"neutron.plugins.midonet.agent.midonet_driver.DhcpNoOpDriver"} neutron_plugin_setup_interface_driver $Q_DHCP_CONF_FILE iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_driver $DHCP_DRIVER @@ -42,15 +42,15 @@ function neutron_plugin_configure_dhcp_agent() { iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { die $LINENO "q-l3 must not be executed with MidoNet plugin!" } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { die $LINENO "q-agt must not be executed with MidoNet plugin!" } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { if [[ "$MIDONET_API_URL" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE MIDONET midonet_uri $MIDONET_API_URL fi @@ -68,17 +68,17 @@ function neutron_plugin_configure_service() { Q_L3_ROUTER_PER_TENANT=True } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.MidonetInterfaceDriver } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 0 means True here return 0 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { # 0 means True here return 1 } diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 4ceabe765d..e985dcb4a5 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -33,7 +33,7 @@ Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS:-} # L3 Plugin to load for ML2 ML2_L3_PLUGIN=${ML2_L3_PLUGIN:-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin} -function populate_ml2_config() { +function populate_ml2_config { CONF=$1 SECTION=$2 OPTS=$3 @@ -47,7 +47,7 @@ function populate_ml2_config() { done } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ml2 Q_PLUGIN_CONF_FILENAME=ml2_conf.ini Q_DB_NAME="neutron_ml2" @@ -57,7 +57,7 @@ function neutron_plugin_configure_common() { _neutron_service_plugin_class_add $ML2_L3_PLUGIN } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "" ]]; then Q_SRV_EXTRA_OPTS+=(tenant_network_types=$Q_ML2_TENANT_NETWORK_TYPE) elif [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then @@ -114,7 +114,7 @@ function neutron_plugin_configure_service() { populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vlan $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { return 0 } diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec index 1cb2fef533..6d4bfca244 100644 --- a/lib/neutron_plugins/nec +++ b/lib/neutron_plugins/nec @@ -22,11 +22,11 @@ OFC_RETRY_INTERVAL=${OFC_RETRY_INTERVAL:-1} source $TOP_DIR/lib/neutron_plugins/ovs_base -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { _neutron_ovs_base_configure_nova_vif_driver } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { # SKIP_OVS_INSTALL is useful when we want to use Open vSwitch whose # version is different from the version provided by the distribution. if [[ "$SKIP_OVS_INSTALL" = "True" ]]; then @@ -36,26 +36,26 @@ function neutron_plugin_install_agent_packages() { _neutron_ovs_base_install_agent_packages } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nec Q_PLUGIN_CONF_FILENAME=nec.ini Q_DB_NAME="neutron_nec" Q_PLUGIN_CLASS="neutron.plugins.nec.nec_plugin.NECPluginV2" } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { _neutron_ovs_base_configure_debug_command } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { : } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { _neutron_ovs_base_configure_l3_agent } -function _quantum_plugin_setup_bridge() { +function _quantum_plugin_setup_bridge { if [[ "$SKIP_OVS_BRIDGE_SETUP" = "True" ]]; then return fi @@ -72,7 +72,7 @@ function _quantum_plugin_setup_bridge() { _neutron_setup_ovs_tunnels $OVS_BRIDGE } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { _quantum_plugin_setup_bridge AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nec-agent" @@ -80,7 +80,7 @@ function neutron_plugin_configure_plugin_agent() { _neutron_ovs_base_configure_firewall_driver } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { iniset $NEUTRON_CONF DEFAULT api_extensions_path neutron/plugins/nec/extensions/ iniset /$Q_PLUGIN_CONF_FILE ofc host $OFC_API_HOST iniset /$Q_PLUGIN_CONF_FILE ofc port $OFC_API_PORT @@ -91,7 +91,7 @@ function neutron_plugin_configure_service() { _neutron_ovs_base_configure_firewall_driver } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver iniset $conf_file DEFAULT ovs_use_veth True @@ -101,7 +101,7 @@ function neutron_plugin_setup_interface_driver() { # --------------------------- # Setup OVS tunnel manually -function _neutron_setup_ovs_tunnels() { +function _neutron_setup_ovs_tunnels { local bridge=$1 local id=0 GRE_LOCAL_IP=${GRE_LOCAL_IP:-$HOST_IP} @@ -117,12 +117,12 @@ function _neutron_setup_ovs_tunnels() { fi } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 0 means True here return 0 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_plugins/openvswitch b/lib/neutron_plugins/openvswitch index f99eb383d8..bdbc5a9367 100644 --- a/lib/neutron_plugins/openvswitch +++ b/lib/neutron_plugins/openvswitch @@ -7,14 +7,14 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/openvswitch_agent -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/openvswitch Q_PLUGIN_CONF_FILENAME=ovs_neutron_plugin.ini Q_DB_NAME="ovs_neutron" Q_PLUGIN_CLASS="neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2" } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE ovs tenant_network_type gre iniset /$Q_PLUGIN_CONF_FILE ovs tunnel_id_ranges $TENANT_TUNNEL_RANGES @@ -52,7 +52,7 @@ function neutron_plugin_configure_service() { done } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { return 0 } diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index 46c2a5c6e2..3a2bdc316a 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -7,7 +7,7 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/ovs_base -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { _neutron_ovs_base_configure_nova_vif_driver if [ "$VIRT_DRIVER" = 'xenserver' ]; then iniset $NOVA_CONF DEFAULT xenapi_vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver @@ -17,24 +17,24 @@ function neutron_plugin_create_nova_conf() { fi } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { _neutron_ovs_base_install_agent_packages } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { _neutron_ovs_base_configure_debug_command } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { _neutron_ovs_base_configure_l3_agent iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { # Setup integration bridge _neutron_ovs_base_setup_bridge $OVS_BRIDGE _neutron_ovs_base_configure_firewall_driver @@ -118,12 +118,12 @@ function neutron_plugin_configure_plugin_agent() { done } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 89db29d07f..0a2ba58fbb 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -8,19 +8,19 @@ set +o xtrace OVS_BRIDGE=${OVS_BRIDGE:-br-int} PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { # Yes, we use OVS. return 0 } -function _neutron_ovs_base_setup_bridge() { +function _neutron_ovs_base_setup_bridge { local bridge=$1 neutron-ovs-cleanup sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge } -function neutron_ovs_base_cleanup() { +function neutron_ovs_base_cleanup { # remove all OVS ports that look like Neutron created ports for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do sudo ovs-vsctl del-port ${port} @@ -32,7 +32,7 @@ function neutron_ovs_base_cleanup() { done } -function _neutron_ovs_base_install_agent_packages() { +function _neutron_ovs_base_install_agent_packages { local kernel_version # Install deps # FIXME add to ``files/apts/neutron``, but don't install if not needed! @@ -50,11 +50,11 @@ function _neutron_ovs_base_install_agent_packages() { fi } -function _neutron_ovs_base_configure_debug_command() { +function _neutron_ovs_base_configure_debug_command { iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE } -function _neutron_ovs_base_configure_firewall_driver() { +function _neutron_ovs_base_configure_firewall_driver { if [[ "$Q_USE_SECGROUP" == "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver else @@ -62,7 +62,7 @@ function _neutron_ovs_base_configure_firewall_driver() { fi } -function _neutron_ovs_base_configure_l3_agent() { +function _neutron_ovs_base_configure_l3_agent { iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE neutron-ovs-cleanup @@ -72,7 +72,7 @@ function _neutron_ovs_base_configure_l3_agent() { sudo ip addr flush dev $PUBLIC_BRIDGE } -function _neutron_ovs_base_configure_nova_vif_driver() { +function _neutron_ovs_base_configure_nova_vif_driver { : } diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid index bccd301011..19f94cb78c 100644 --- a/lib/neutron_plugins/plumgrid +++ b/lib/neutron_plugins/plumgrid @@ -6,15 +6,15 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { : } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { : } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/plumgrid Q_PLUGIN_CONF_FILENAME=plumgrid.ini Q_DB_NAME="plumgrid_neutron" @@ -26,7 +26,7 @@ function neutron_plugin_configure_common() { PLUMGRID_TIMEOUT=${PLUMGRID_TIMEOUT:-70} } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector director_server $PLUMGRID_DIRECTOR_IP iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector director_server_port $PLUMGRID_DIRECTOR_PORT iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector username $PLUMGRID_ADMIN @@ -34,21 +34,21 @@ function neutron_plugin_configure_service() { iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector servertimeout $PLUMGRID_TIMEOUT } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { : } -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { # False return 1 } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # False return 1 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/ryu b/lib/neutron_plugins/ryu index 334c227cdb..9ae36d38fa 100644 --- a/lib/neutron_plugins/ryu +++ b/lib/neutron_plugins/ryu @@ -8,12 +8,12 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/ovs_base source $TOP_DIR/lib/neutron_thirdparty/ryu # for configuration value -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { _neutron_ovs_base_configure_nova_vif_driver iniset $NOVA_CONF DEFAULT libvirt_ovs_integration_bridge "$OVS_BRIDGE" } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { _neutron_ovs_base_install_agent_packages # neutron_ryu_agent requires ryu module @@ -22,28 +22,28 @@ function neutron_plugin_install_agent_packages() { configure_ryu } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ryu Q_PLUGIN_CONF_FILENAME=ryu.ini Q_DB_NAME="ovs_neutron" Q_PLUGIN_CLASS="neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2" } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { _neutron_ovs_base_configure_debug_command iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { iniset $Q_L3_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT _neutron_ovs_base_configure_l3_agent } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { # Set up integration bridge _neutron_ovs_base_setup_bridge $OVS_BRIDGE if [ -n "$RYU_INTERNAL_INTERFACE" ]; then @@ -55,24 +55,24 @@ function neutron_plugin_configure_plugin_agent() { _neutron_ovs_base_configure_firewall_driver } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { iniset /$Q_PLUGIN_CONF_FILE ovs openflow_rest_api $RYU_API_HOST:$RYU_API_PORT _neutron_ovs_base_configure_firewall_driver } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver iniset $conf_file DEFAULT ovs_use_veth True } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 0 means True here return 0 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall index 8273e54e6c..ab6c32426a 100644 --- a/lib/neutron_plugins/services/firewall +++ b/lib/neutron_plugins/services/firewall @@ -7,11 +7,11 @@ set +o xtrace FWAAS_PLUGIN=neutron.services.firewall.fwaas_plugin.FirewallPlugin -function neutron_fwaas_configure_common() { +function neutron_fwaas_configure_common { _neutron_service_plugin_class_add $FWAAS_PLUGIN } -function neutron_fwaas_configure_driver() { +function neutron_fwaas_configure_driver { FWAAS_DRIVER_CONF_FILENAME=/etc/neutron/fwaas_driver.ini cp $NEUTRON_DIR/etc/fwaas_driver.ini $FWAAS_DRIVER_CONF_FILENAME @@ -19,7 +19,7 @@ function neutron_fwaas_configure_driver() { iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas driver "neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver" } -function neutron_fwaas_stop() { +function neutron_fwaas_stop { : } diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer index 5d7a94e5d8..744826e49d 100644 --- a/lib/neutron_plugins/services/loadbalancer +++ b/lib/neutron_plugins/services/loadbalancer @@ -9,7 +9,7 @@ set +o xtrace AGENT_LBAAS_BINARY="$NEUTRON_BIN_DIR/neutron-lbaas-agent" LBAAS_PLUGIN=neutron.services.loadbalancer.plugin.LoadBalancerPlugin -function neutron_agent_lbaas_install_agent_packages() { +function neutron_agent_lbaas_install_agent_packages { if is_ubuntu || is_fedora; then install_package haproxy elif is_suse; then @@ -18,11 +18,11 @@ function neutron_agent_lbaas_install_agent_packages() { fi } -function neutron_agent_lbaas_configure_common() { +function neutron_agent_lbaas_configure_common { _neutron_service_plugin_class_add $LBAAS_PLUGIN } -function neutron_agent_lbaas_configure_agent() { +function neutron_agent_lbaas_configure_agent { LBAAS_AGENT_CONF_PATH=/etc/neutron/services/loadbalancer/haproxy mkdir -p $LBAAS_AGENT_CONF_PATH @@ -41,7 +41,7 @@ function neutron_agent_lbaas_configure_agent() { fi } -function neutron_lbaas_stop() { +function neutron_lbaas_stop { pids=$(ps aux | awk '/haproxy/ { print $2 }') [ ! -z "$pids" ] && sudo kill $pids } diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering index 37952bbabd..0e5f75b27b 100644 --- a/lib/neutron_plugins/services/metering +++ b/lib/neutron_plugins/services/metering @@ -9,11 +9,11 @@ set +o xtrace AGENT_METERING_BINARY="$NEUTRON_BIN_DIR/neutron-metering-agent" METERING_PLUGIN="neutron.services.metering.metering_plugin.MeteringPlugin" -function neutron_agent_metering_configure_common() { +function neutron_agent_metering_configure_common { _neutron_service_plugin_class_add $METERING_PLUGIN } -function neutron_agent_metering_configure_agent() { +function neutron_agent_metering_configure_agent { METERING_AGENT_CONF_PATH=/etc/neutron/services/metering mkdir -p $METERING_AGENT_CONF_PATH @@ -22,7 +22,7 @@ function neutron_agent_metering_configure_agent() { cp $NEUTRON_DIR/etc/metering_agent.ini $METERING_AGENT_CONF_FILENAME } -function neutron_metering_stop() { +function neutron_metering_stop { : } diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn index 02370e7f85..e56d3613c2 100644 --- a/lib/neutron_plugins/services/vpn +++ b/lib/neutron_plugins/services/vpn @@ -10,15 +10,15 @@ AGENT_VPN_BINARY="$NEUTRON_BIN_DIR/neutron-vpn-agent" VPN_PLUGIN="neutron.services.vpn.plugin.VPNDriverPlugin" IPSEC_PACKAGE=${IPSEC_PACKAGE:-"openswan"} -function neutron_vpn_install_agent_packages() { +function neutron_vpn_install_agent_packages { install_package $IPSEC_PACKAGE } -function neutron_vpn_configure_common() { +function neutron_vpn_configure_common { _neutron_service_plugin_class_add $VPN_PLUGIN } -function neutron_vpn_stop() { +function neutron_vpn_stop { local ipsec_data_dir=$DATA_DIR/neutron/ipsec local pids if [ -d $ipsec_data_dir ]; then diff --git a/lib/neutron_plugins/vmware_nsx b/lib/neutron_plugins/vmware_nsx index d506cb6f8d..0930422e4e 100644 --- a/lib/neutron_plugins/vmware_nsx +++ b/lib/neutron_plugins/vmware_nsx @@ -7,7 +7,7 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/ovs_base -function setup_integration_bridge() { +function setup_integration_bridge { _neutron_ovs_base_setup_bridge $OVS_BRIDGE # Set manager to NSX controller (1st of list) if [[ "$NSX_CONTROLLERS" != "" ]]; then @@ -20,24 +20,24 @@ function setup_integration_bridge() { sudo ovs-vsctl set-manager ssl:$OVS_MGR_IP } -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { # NSX uses OVS, but not the l3-agent return 0 } -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { # if n-cpu is enabled, then setup integration bridge if is_service_enabled n-cpu; then setup_integration_bridge fi } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { # VMware NSX Plugin does not run q-agt, but it currently needs dhcp and metadata agents _neutron_ovs_base_install_agent_packages } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware Q_PLUGIN_CONF_FILENAME=nsx.ini Q_DB_NAME="neutron_nsx" @@ -45,29 +45,29 @@ function neutron_plugin_configure_common() { Q_PLUGIN_CLASS="neutron.plugins.nicira.NeutronPlugin.NvpPluginV2" } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge "$PUBLIC_BRIDGE" } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { setup_integration_bridge iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network True iniset $Q_DHCP_CONF_FILE DEFAULT ovs_use_veth True } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { # VMware NSX plugin does not run L3 agent die $LINENO "q-l3 should must not be executed with VMware NSX plugin!" } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { # VMware NSX plugin does not run L2 agent die $LINENO "q-agt must not be executed with VMware NSX plugin!" } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { if [[ "$MAX_LP_PER_BRIDGED_LS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS fi @@ -132,17 +132,17 @@ function neutron_plugin_configure_service() { fi } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 0 means True here return 0 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight index 24c10443b7..f03de56295 100644 --- a/lib/neutron_thirdparty/bigswitch_floodlight +++ b/lib/neutron_thirdparty/bigswitch_floodlight @@ -8,11 +8,11 @@ set +o xtrace BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} BS_FL_OF_PORT=${BS_FL_OF_PORT:-6633} -function configure_bigswitch_floodlight() { +function configure_bigswitch_floodlight { : } -function init_bigswitch_floodlight() { +function init_bigswitch_floodlight { install_neutron_agent_packages echo -n "Installing OVS managed by the openflow controllers:" @@ -32,19 +32,19 @@ function init_bigswitch_floodlight() { sudo ovs-vsctl --no-wait set-controller ${OVS_BRIDGE} ${ctrls} } -function install_bigswitch_floodlight() { +function install_bigswitch_floodlight { : } -function start_bigswitch_floodlight() { +function start_bigswitch_floodlight { : } -function stop_bigswitch_floodlight() { +function stop_bigswitch_floodlight { : } -function check_bigswitch_floodlight() { +function check_bigswitch_floodlight { : } diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet index 98be4254fc..ad417bbc29 100644 --- a/lib/neutron_thirdparty/midonet +++ b/lib/neutron_thirdparty/midonet @@ -20,28 +20,28 @@ MIDONET_CLIENT_DIR=${MIDONET_CLIENT_DIR:-$MIDONET_DIR/python-midonetclient} MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -function configure_midonet() { +function configure_midonet { : } -function init_midonet() { +function init_midonet { : } -function install_midonet() { +function install_midonet { git_clone $MIDONET_CLIENT_REPO $MIDONET_CLIENT_DIR $MIDONET_CLIENT_BRANCH export PYTHONPATH=$MIDONET_CLIENT_DIR/src:$PYTHONPATH } -function start_midonet() { +function start_midonet { : } -function stop_midonet() { +function stop_midonet { : } -function check_midonet() { +function check_midonet { : } diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu index 5edf273361..424a90041e 100644 --- a/lib/neutron_thirdparty/ryu +++ b/lib/neutron_thirdparty/ryu @@ -21,14 +21,14 @@ RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest} # configure_ryu can be called multiple times as neutron_pluing/ryu may call # this function for neutron-ryu-agent _RYU_CONFIGURED=${_RYU_CONFIGURED:-False} -function configure_ryu() { +function configure_ryu { if [[ "$_RYU_CONFIGURED" == "False" ]]; then setup_develop $RYU_DIR _RYU_CONFIGURED=True fi } -function init_ryu() { +function init_ryu { RYU_CONF_DIR=/etc/ryu if [[ ! -d $RYU_CONF_DIR ]]; then sudo mkdir -p $RYU_CONF_DIR @@ -60,22 +60,22 @@ neutron_controller_addr=tcp:$RYU_OFP_HOST:$RYU_OFP_PORT # Make this function idempotent and avoid cloning same repo many times # with RECLONE=yes _RYU_INSTALLED=${_RYU_INSTALLED:-False} -function install_ryu() { +function install_ryu { if [[ "$_RYU_INSTALLED" == "False" ]]; then git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH _RYU_INSTALLED=True fi } -function start_ryu() { +function start_ryu { screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --config-file $RYU_CONF" } -function stop_ryu() { +function stop_ryu { : } -function check_ryu() { +function check_ryu { : } diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema index 2b125646dc..d465ac753e 100644 --- a/lib/neutron_thirdparty/trema +++ b/lib/neutron_thirdparty/trema @@ -31,7 +31,7 @@ TREMA_SS_CONFIG=$TREMA_SS_ETC_DIR/sliceable.conf TREMA_SS_APACHE_CONFIG=/etc/apache2/sites-available/sliceable_switch.conf # configure_trema - Set config files, create data dirs, etc -function configure_trema() { +function configure_trema { # prepare dir for d in $TREMA_SS_ETC_DIR $TREMA_SS_DB_DIR $TREMA_SS_SCRIPT_DIR; do sudo mkdir -p $d @@ -41,7 +41,7 @@ function configure_trema() { } # init_trema - Initialize databases, etc. -function init_trema() { +function init_trema { local _pwd=$(pwd) # Initialize databases for Sliceable Switch @@ -70,7 +70,7 @@ function init_trema() { $TREMA_SS_CONFIG } -function gem_install() { +function gem_install { [[ "$OFFLINE" = "True" ]] && return [ -n "$RUBYGEMS_CMD" ] || get_gem_command @@ -79,7 +79,7 @@ function gem_install() { sudo $RUBYGEMS_CMD install $pkg } -function get_gem_command() { +function get_gem_command { # Trema requires ruby 1.8, so gem1.8 is checked first RUBYGEMS_CMD=$(which gem1.8 || which gem) if [ -z "$RUBYGEMS_CMD" ]; then @@ -87,7 +87,7 @@ function get_gem_command() { fi } -function install_trema() { +function install_trema { # Trema gem_install trema # Sliceable Switch @@ -97,7 +97,7 @@ function install_trema() { make -C $TREMA_DIR/apps/sliceable_switch } -function start_trema() { +function start_trema { # APACHE_NAME is defined in init_horizon (in lib/horizon) restart_service $APACHE_NAME @@ -105,11 +105,11 @@ function start_trema() { trema run -d -c $TREMA_SS_CONFIG } -function stop_trema() { +function stop_trema { sudo TREMA_TMP=$TREMA_TMP_DIR trema killall } -function check_trema() { +function check_trema { : } diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx index 4eb177a458..3fecc62560 100644 --- a/lib/neutron_thirdparty/vmware_nsx +++ b/lib/neutron_thirdparty/vmware_nsx @@ -22,11 +22,11 @@ NSX_GATEWAY_NETWORK_INTERFACE=${NSX_GATEWAY_NETWORK_INTERFACE:-eth2} # is invoked by unstack.sh FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} -function configure_vmware_nsx() { +function configure_vmware_nsx { : } -function init_vmware_nsx() { +function init_vmware_nsx { if ! is_set NSX_GATEWAY_NETWORK_CIDR; then NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address to set on br-ex was not specified. " @@ -52,15 +52,15 @@ function init_vmware_nsx() { sudo ip addr add dev $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_CIDR } -function install_vmware_nsx() { +function install_vmware_nsx { : } -function start_vmware_nsx() { +function start_vmware_nsx { : } -function stop_vmware_nsx() { +function stop_vmware_nsx { if ! is_set NSX_GATEWAY_NETWORK_CIDR; then NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address expected on br-ex was not specified. " @@ -78,7 +78,7 @@ function stop_vmware_nsx() { done } -function check_vmware_nsx() { +function check_vmware_nsx { neutron-check-nsx-config $NEUTRON_CONF_DIR/plugins/vmware/nsx.ini } diff --git a/lib/nova b/lib/nova index fefeda1236..90b1ba4fde 100644 --- a/lib/nova +++ b/lib/nova @@ -144,7 +144,7 @@ function is_n-cell_enabled { } # Helper to clean iptables rules -function clean_iptables() { +function clean_iptables { # Delete rules sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables",$0}' | bash # Delete nat rules @@ -157,7 +157,7 @@ function clean_iptables() { # cleanup_nova() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_nova() { +function cleanup_nova { if is_service_enabled n-cpu; then # Clean iptables from previous runs clean_iptables @@ -191,7 +191,7 @@ function cleanup_nova() { } # configure_nova_rootwrap() - configure Nova's rootwrap -function configure_nova_rootwrap() { +function configure_nova_rootwrap { # Deploy new rootwrap filters files (owned by root). # Wipe any existing rootwrap.d files first if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then @@ -219,7 +219,7 @@ function configure_nova_rootwrap() { } # configure_nova() - Set config files, create data dirs, etc -function configure_nova() { +function configure_nova { # Put config files in ``/etc/nova`` for everyone to find if [[ ! -d $NOVA_CONF_DIR ]]; then sudo mkdir -p $NOVA_CONF_DIR @@ -367,7 +367,7 @@ create_nova_accounts() { } # create_nova_conf() - Create a new nova.conf file -function create_nova_conf() { +function create_nova_conf { # Remove legacy ``nova.conf`` rm -f $NOVA_DIR/bin/nova.conf @@ -515,7 +515,7 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" } -function init_nova_cells() { +function init_nova_cells { if is_service_enabled n-cell; then cp $NOVA_CONF $NOVA_CELLS_CONF iniset $NOVA_CELLS_CONF DEFAULT sql_connection `database_connection_url $NOVA_CELLS_DB` @@ -542,14 +542,14 @@ function init_nova_cells() { } # create_nova_cache_dir() - Part of the init_nova() process -function create_nova_cache_dir() { +function create_nova_cache_dir { # Create cache dir sudo mkdir -p $NOVA_AUTH_CACHE_DIR sudo chown $STACK_USER $NOVA_AUTH_CACHE_DIR rm -f $NOVA_AUTH_CACHE_DIR/* } -function create_nova_conf_nova_network() { +function create_nova_conf_nova_network { iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER" iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE" iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE" @@ -560,14 +560,14 @@ function create_nova_conf_nova_network() { } # create_nova_keys_dir() - Part of the init_nova() process -function create_nova_keys_dir() { +function create_nova_keys_dir { # Create keys dir sudo mkdir -p ${NOVA_STATE_PATH}/keys sudo chown -R $STACK_USER ${NOVA_STATE_PATH} } # init_nova() - Initialize databases, etc. -function init_nova() { +function init_nova { # All nova components talk to a central database. # Only do this step once on the API node for an entire cluster. if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then @@ -596,14 +596,14 @@ function init_nova() { } # install_novaclient() - Collect source and prepare -function install_novaclient() { +function install_novaclient { git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH setup_develop $NOVACLIENT_DIR sudo install -D -m 0644 -o $STACK_USER {$NOVACLIENT_DIR/tools/,/etc/bash_completion.d/}nova.bash_completion } # install_nova() - Collect source and prepare -function install_nova() { +function install_nova { if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then install_nova_hypervisor fi @@ -638,7 +638,7 @@ function install_nova() { } # start_nova_api() - Start the API process ahead of other things -function start_nova_api() { +function start_nova_api { # Get right service port for testing local service_port=$NOVA_SERVICE_PORT if is_service_enabled tls-proxy; then @@ -658,7 +658,7 @@ function start_nova_api() { } # start_nova_compute() - Start the compute process -function start_nova_compute() { +function start_nova_compute { if is_service_enabled n-cell; then local compute_cell_conf=$NOVA_CELLS_CONF else @@ -693,7 +693,7 @@ function start_nova_compute() { } # start_nova() - Start running processes, including screen -function start_nova_rest() { +function start_nova_rest { local api_cell_conf=$NOVA_CONF if is_service_enabled n-cell; then local compute_cell_conf=$NOVA_CELLS_CONF @@ -722,13 +722,13 @@ function start_nova_rest() { screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore --config-file $api_cell_conf" } -function start_nova() { +function start_nova { start_nova_compute start_nova_rest } # stop_nova() - Stop running processes (non-screen) -function stop_nova() { +function stop_nova { # Kill the nova screen windows # Some services are listed here twice since more than one instance # of a service may be running in certain configs. diff --git a/lib/nova_plugins/hypervisor-baremetal b/lib/nova_plugins/hypervisor-baremetal index 660c977bde..2da1097027 100644 --- a/lib/nova_plugins/hypervisor-baremetal +++ b/lib/nova_plugins/hypervisor-baremetal @@ -33,13 +33,13 @@ STUB_NETWORK=${STUB_NETWORK:-False} # ------------ # clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor() { +function cleanup_nova_hypervisor { # This function intentionally left blank : } # configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor() { +function configure_nova_hypervisor { configure_baremetal_nova_dirs iniset $NOVA_CONF baremetal sql_connection `database_connection_url nova_bm` @@ -67,19 +67,19 @@ function configure_nova_hypervisor() { } # install_nova_hypervisor() - Install external components -function install_nova_hypervisor() { +function install_nova_hypervisor { # This function intentionally left blank : } # start_nova_hypervisor - Start any required external services -function start_nova_hypervisor() { +function start_nova_hypervisor { # This function intentionally left blank : } # stop_nova_hypervisor - Stop any external services -function stop_nova_hypervisor() { +function stop_nova_hypervisor { # This function intentionally left blank : } diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index b5df19db02..f8dc6afa19 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -44,7 +44,7 @@ DOCKER_APT_REPO=${DOCKER_APT_REPO:-https://get.docker.io/ubuntu} # ------------ # clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor() { +function cleanup_nova_hypervisor { stop_service docker # Clean out work area @@ -52,13 +52,13 @@ function cleanup_nova_hypervisor() { } # configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor() { +function configure_nova_hypervisor { iniset $NOVA_CONF DEFAULT compute_driver docker.DockerDriver iniset $GLANCE_API_CONF DEFAULT container_formats ami,ari,aki,bare,ovf,docker } # install_nova_hypervisor() - Install external components -function install_nova_hypervisor() { +function install_nova_hypervisor { # So far this is Ubuntu only if ! is_ubuntu; then die $LINENO "Docker is only supported on Ubuntu at this time" @@ -77,7 +77,7 @@ function install_nova_hypervisor() { } # start_nova_hypervisor - Start any required external services -function start_nova_hypervisor() { +function start_nova_hypervisor { local docker_pid read docker_pid <$DOCKER_PID_FILE if [[ -z $docker_pid ]] || ! ps -p $docker_pid | grep [d]ocker; then @@ -111,7 +111,7 @@ function start_nova_hypervisor() { } # stop_nova_hypervisor - Stop any external services -function stop_nova_hypervisor() { +function stop_nova_hypervisor { # Stop the docker registry container docker kill $(docker ps | grep docker-registry | cut -d' ' -f1) } diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake index fe0d1900ee..e7a833f806 100644 --- a/lib/nova_plugins/hypervisor-fake +++ b/lib/nova_plugins/hypervisor-fake @@ -27,13 +27,13 @@ set +o xtrace # ------------ # clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor() { +function cleanup_nova_hypervisor { # This function intentionally left blank : } # configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor() { +function configure_nova_hypervisor { iniset $NOVA_CONF DEFAULT compute_driver "nova.virt.fake.FakeDriver" # Disable arbitrary limits iniset $NOVA_CONF DEFAULT quota_instances -1 @@ -51,19 +51,19 @@ function configure_nova_hypervisor() { } # install_nova_hypervisor() - Install external components -function install_nova_hypervisor() { +function install_nova_hypervisor { # This function intentionally left blank : } # start_nova_hypervisor - Start any required external services -function start_nova_hypervisor() { +function start_nova_hypervisor { # This function intentionally left blank : } # stop_nova_hypervisor - Stop any external services -function stop_nova_hypervisor() { +function stop_nova_hypervisor { # This function intentionally left blank : } diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index a550600363..b39c57c74a 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -31,13 +31,13 @@ ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False} # ------------ # clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor() { +function cleanup_nova_hypervisor { # This function intentionally left blank : } # configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor() { +function configure_nova_hypervisor { if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces cat </dev/null; then echo "Found old oslo.config... removing to ensure consistency" diff --git a/lib/rpc_backend b/lib/rpc_backend index 34f576f5b8..a0424b1dee 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -25,7 +25,7 @@ set +o xtrace # Make sure we only have one rpc backend enabled. # Also check the specified rpc backend is available on your platform. -function check_rpc_backend() { +function check_rpc_backend { local rpc_needed=1 # We rely on the fact that filenames in lib/* match the service names # that can be passed as arguments to is_service_enabled. @@ -91,7 +91,7 @@ function cleanup_rpc_backend { } # install rpc backend -function install_rpc_backend() { +function install_rpc_backend { if is_service_enabled rabbit; then # Install rabbitmq-server # the temp file is necessary due to LP: #878600 @@ -135,7 +135,7 @@ function install_rpc_backend() { } # restart the rpc backend -function restart_rpc_backend() { +function restart_rpc_backend { if is_service_enabled rabbit; then # Start rabbitmq-server echo_summary "Starting RabbitMQ" @@ -165,7 +165,7 @@ function restart_rpc_backend() { } # iniset cofiguration -function iniset_rpc_backend() { +function iniset_rpc_backend { local package=$1 local file=$2 local section=$3 @@ -193,7 +193,7 @@ function iniset_rpc_backend() { # Check if qpid can be used on the current distro. # qpid_is_supported -function qpid_is_supported() { +function qpid_is_supported { if [[ -z "$DISTRO" ]]; then GetDistro fi diff --git a/lib/savanna b/lib/savanna index 954f0e711e..d7152b1e6f 100644 --- a/lib/savanna +++ b/lib/savanna @@ -55,7 +55,7 @@ TEMPEST_SERVICES+=,savanna # Tenant User Roles # ------------------------------ # service savanna admin -function create_savanna_accounts() { +function create_savanna_accounts { SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") @@ -88,14 +88,14 @@ function create_savanna_accounts() { # cleanup_savanna() - Remove residual data files, anything left over from # previous runs that would need to clean up. -function cleanup_savanna() { +function cleanup_savanna { # Cleanup auth cache dir sudo rm -rf $SAVANNA_AUTH_CACHE_DIR } # configure_savanna() - Set config files, create data dirs, etc -function configure_savanna() { +function configure_savanna { if [[ ! -d $SAVANNA_CONF_DIR ]]; then sudo mkdir -p $SAVANNA_CONF_DIR @@ -142,18 +142,18 @@ function configure_savanna() { } # install_savanna() - Collect source and prepare -function install_savanna() { +function install_savanna { git_clone $SAVANNA_REPO $SAVANNA_DIR $SAVANNA_BRANCH setup_develop $SAVANNA_DIR } # start_savanna() - Start running processes, including screen -function start_savanna() { +function start_savanna { screen_it savanna "cd $SAVANNA_DIR && $SAVANNA_BIN_DIR/savanna-api --config-file $SAVANNA_CONF_FILE" } # stop_savanna() - Stop running processes -function stop_savanna() { +function stop_savanna { # Kill the Savanna screen windows screen -S $SCREEN_NAME -p savanna -X kill } diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard index 691b23f6e8..6fe15a3c81 100644 --- a/lib/savanna-dashboard +++ b/lib/savanna-dashboard @@ -35,7 +35,7 @@ SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient # Functions # --------- -function configure_savanna_dashboard() { +function configure_savanna_dashboard { echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)" >> $HORIZON_DIR/openstack_dashboard/settings.py @@ -47,19 +47,19 @@ function configure_savanna_dashboard() { } # install_savanna_dashboard() - Collect source and prepare -function install_savanna_dashboard() { +function install_savanna_dashboard { install_python_savannaclient git_clone $SAVANNA_DASHBOARD_REPO $SAVANNA_DASHBOARD_DIR $SAVANNA_DASHBOARD_BRANCH setup_develop $SAVANNA_DASHBOARD_DIR } -function install_python_savannaclient() { +function install_python_savannaclient { git_clone $SAVANNA_PYTHONCLIENT_REPO $SAVANNA_PYTHONCLIENT_DIR $SAVANNA_PYTHONCLIENT_BRANCH setup_develop $SAVANNA_PYTHONCLIENT_DIR } # Cleanup file settings.py from Savanna -function cleanup_savanna_dashboard() { +function cleanup_savanna_dashboard { sed -i '/savanna/d' $HORIZON_DIR/openstack_dashboard/settings.py } diff --git a/lib/stackforge b/lib/stackforge index 5fa4570b74..dca08cc2c2 100644 --- a/lib/stackforge +++ b/lib/stackforge @@ -34,7 +34,7 @@ PECAN_DIR=$DEST/pecan # ------------ # install_stackforge() - Collect source and prepare -function install_stackforge() { +function install_stackforge { # TODO(sdague): remove this once we get to Icehouse, this just makes # for a smoother transition of existing users. cleanup_stackforge @@ -47,7 +47,7 @@ function install_stackforge() { } # cleanup_stackforge() - purge possibly old versions of stackforge libraries -function cleanup_stackforge() { +function cleanup_stackforge { # this means we've got an old version installed, lets get rid of it # otherwise python hates itself for lib in wsme pecan; do diff --git a/lib/swift b/lib/swift index 6c33af5082..59c1e54d8a 100644 --- a/lib/swift +++ b/lib/swift @@ -126,7 +126,7 @@ function is_swift_enabled { } # cleanup_swift() - Remove residual data files -function cleanup_swift() { +function cleanup_swift { rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 @@ -141,7 +141,7 @@ function cleanup_swift() { } # _cleanup_swift_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file -function _cleanup_swift_apache_wsgi() { +function _cleanup_swift_apache_wsgi { sudo rm -f $SWIFT_APACHE_WSGI_DIR/*.wsgi disable_apache_site proxy-server for node_number in ${SWIFT_REPLICAS_SEQ}; do @@ -154,7 +154,7 @@ function _cleanup_swift_apache_wsgi() { } # _config_swift_apache_wsgi() - Set WSGI config files of Swift -function _config_swift_apache_wsgi() { +function _config_swift_apache_wsgi { sudo mkdir -p ${SWIFT_APACHE_WSGI_DIR} local apache_vhost_dir=/etc/${APACHE_NAME}/$APACHE_CONF_DIR local proxy_port=${SWIFT_DEFAULT_BIND_PORT:-8080} @@ -233,7 +233,7 @@ function _config_swift_apache_wsgi() { # This function generates an object/container/account configuration # emulating 4 nodes on different ports -function generate_swift_config() { +function generate_swift_config { local swift_node_config=$1 local node_id=$2 local bind_port=$3 @@ -272,7 +272,7 @@ function generate_swift_config() { # configure_swift() - Set config files, create data dirs and loop image -function configure_swift() { +function configure_swift { local swift_pipeline="${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH}" local node_number local swift_node_config @@ -460,7 +460,7 @@ EOF } # create_swift_disk - Create Swift backing disk -function create_swift_disk() { +function create_swift_disk { local node_number # First do a bit of setup by creating the directories and @@ -520,7 +520,7 @@ function create_swift_disk() { # swifttenanttest1 swiftusertest3 anotherrole # swifttenanttest2 swiftusertest2 admin -function create_swift_accounts() { +function create_swift_accounts { # Defines specific passwords used by tools/create_userrc.sh SWIFTUSERTEST1_PASSWORD=testing SWIFTUSERTEST2_PASSWORD=testing2 @@ -578,7 +578,7 @@ function create_swift_accounts() { } # init_swift() - Initialize rings -function init_swift() { +function init_swift { local node_number # Make sure to kill all swift processes first swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true @@ -612,7 +612,7 @@ function init_swift() { rm -f $SWIFT_AUTH_CACHE_DIR/* } -function install_swift() { +function install_swift { git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH setup_develop $SWIFT_DIR if is_apache_enabled_service swift; then @@ -620,13 +620,13 @@ function install_swift() { fi } -function install_swiftclient() { +function install_swiftclient { git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH setup_develop $SWIFTCLIENT_DIR } # start_swift() - Start running processes, including screen -function start_swift() { +function start_swift { # (re)start rsyslog restart_service rsyslog # (re)start memcached to make sure we have a clean memcache. @@ -674,7 +674,7 @@ function start_swift() { } # stop_swift() - Stop running processes (non-screen) -function stop_swift() { +function stop_swift { if is_apache_enabled_service swift; then swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0 diff --git a/lib/tempest b/lib/tempest index 410c80c46d..16f8744d85 100644 --- a/lib/tempest +++ b/lib/tempest @@ -70,7 +70,7 @@ IPV6_ENABLED=$(trueorfalse True $IPV6_ENABLED) # --------- # configure_tempest() - Set config files, create data dirs, etc -function configure_tempest() { +function configure_tempest { setup_develop $TEMPEST_DIR local image_lines local images @@ -359,12 +359,12 @@ function configure_tempest() { } # install_tempest() - Collect source and prepare -function install_tempest() { +function install_tempest { git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH } # init_tempest() - Initialize ec2 images -function init_tempest() { +function init_tempest { local base_image_name=cirros-0.3.1-x86_64 # /opt/stack/devstack/files/images/cirros-0.3.1-x86_64-uec local image_dir="$FILES/images/${base_image_name}-uec" diff --git a/lib/template b/lib/template index b8e7c4d86f..efe5826f15 100644 --- a/lib/template +++ b/lib/template @@ -45,7 +45,7 @@ function is_XXXX_enabled { # cleanup_XXXX() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_XXXX() { +function cleanup_XXXX { # kill instances (nova) # delete image files (glance) # This function intentionally left blank @@ -53,7 +53,7 @@ function cleanup_XXXX() { } # configure_XXXX() - Set config files, create data dirs, etc -function configure_XXXX() { +function configure_XXXX { # sudo python setup.py deploy # iniset $XXXX_CONF ... # This function intentionally left blank @@ -61,26 +61,26 @@ function configure_XXXX() { } # init_XXXX() - Initialize databases, etc. -function init_XXXX() { +function init_XXXX { # clean up from previous (possibly aborted) runs # create required data files : } # install_XXXX() - Collect source and prepare -function install_XXXX() { +function install_XXXX { # git clone xxx : } # start_XXXX() - Start running processes, including screen -function start_XXXX() { +function start_XXXX { # screen_it XXXX "cd $XXXX_DIR && $XXXX_DIR/bin/XXXX-bin" : } # stop_XXXX() - Stop running processes (non-screen) -function stop_XXXX() { +function stop_XXXX { # FIXME(dtroyer): stop only our screen screen window? : } diff --git a/lib/tls b/lib/tls index 6134fa1bad..072059d599 100644 --- a/lib/tls +++ b/lib/tls @@ -61,7 +61,7 @@ STUD_CIPHERS='TLSv1+HIGH:!DES:!aNULL:!eNULL:@STRENGTH' OPENSSL=${OPENSSL:-/usr/bin/openssl} # Do primary CA configuration -function configure_CA() { +function configure_CA { # build common config file # Verify ``TLS_IP`` is good @@ -73,7 +73,7 @@ function configure_CA() { # Creates a new CA directory structure # create_CA_base ca-dir -function create_CA_base() { +function create_CA_base { local ca_dir=$1 if [[ -d $ca_dir ]]; then @@ -92,7 +92,7 @@ function create_CA_base() { # Create a new CA configuration file # create_CA_config ca-dir common-name -function create_CA_config() { +function create_CA_config { local ca_dir=$1 local common_name=$2 @@ -145,7 +145,7 @@ keyUsage = cRLSign, keyCertSign # Create a new signing configuration file # create_signing_config ca-dir -function create_signing_config() { +function create_signing_config { local ca_dir=$1 echo " @@ -225,7 +225,7 @@ function init_cert { # make_cert creates and signs a new certificate with the given commonName and CA # make_cert ca-dir cert-name "common-name" ["alt-name" ...] -function make_cert() { +function make_cert { local ca_dir=$1 local cert_name=$2 local common_name=$3 @@ -261,7 +261,7 @@ function make_cert() { # Make an intermediate CA to sign everything else # make_int_CA ca-dir signing-ca-dir -function make_int_CA() { +function make_int_CA { local ca_dir=$1 local signing_ca_dir=$2 @@ -291,7 +291,7 @@ function make_int_CA() { # Make a root CA to sign other CAs # make_root_CA ca-dir -function make_root_CA() { +function make_root_CA { local ca_dir=$1 # Create the root CA @@ -319,7 +319,7 @@ function make_root_CA() { # is a short-circuit boolean, i.e it returns on the first match. # # Uses global ``SSL_ENABLED_SERVICES`` -function is_ssl_enabled_service() { +function is_ssl_enabled_service { services=$@ for service in ${services}; do [[ ,${SSL_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 @@ -337,7 +337,7 @@ function is_ssl_enabled_service() { # example for keystone this would be KEYSTONE_SSL_CERT, KEYSTONE_SSL_KEY and # KEYSTONE_SSL_CA. If it does not find these certificates the program will # quit. -function ensure_certificates() { +function ensure_certificates { local service=$1 local cert_var="${service}_SSL_CERT" @@ -362,7 +362,7 @@ function ensure_certificates() { # Starts the TLS proxy for the given IP/ports # start_tls_proxy front-host front-port back-host back-port -function start_tls_proxy() { +function start_tls_proxy { local f_host=$1 local f_port=$2 local b_host=$3 diff --git a/lib/trove b/lib/trove index 6834149c64..75b990f91e 100644 --- a/lib/trove +++ b/lib/trove @@ -53,7 +53,7 @@ function is_trove_enabled { } # setup_trove_logging() - Adds logging configuration to conf files -function setup_trove_logging() { +function setup_trove_logging { local CONF=$1 iniset $CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $CONF DEFAULT use_syslog $SYSLOG @@ -69,7 +69,7 @@ function setup_trove_logging() { # ------------------------------------------------------------------ # service trove admin # if enabled -create_trove_accounts() { +function create_trove_accounts { # Trove SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") SERVICE_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") @@ -106,19 +106,19 @@ create_trove_accounts() { # cleanup_trove() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_trove() { +function cleanup_trove { #Clean up dirs rm -fr $TROVE_AUTH_CACHE_DIR/* rm -fr $TROVE_CONF_DIR/* } # configure_troveclient() - Set config files, create data dirs, etc -function configure_troveclient() { +function configure_troveclient { setup_develop $TROVECLIENT_DIR } # configure_trove() - Set config files, create data dirs, etc -function configure_trove() { +function configure_trove { setup_develop $TROVE_DIR # Create the trove conf dir and cache dirs if they don't exist @@ -182,17 +182,17 @@ function configure_trove() { } # install_troveclient() - Collect source and prepare -function install_troveclient() { +function install_troveclient { git_clone $TROVECLIENT_REPO $TROVECLIENT_DIR $TROVECLIENT_BRANCH } # install_trove() - Collect source and prepare -function install_trove() { +function install_trove { git_clone $TROVE_REPO $TROVE_DIR $TROVE_BRANCH } # init_trove() - Initializes Trove Database as a Service -function init_trove() { +function init_trove { #(Re)Create trove db recreate_database trove utf8 @@ -201,14 +201,14 @@ function init_trove() { } # start_trove() - Start running processes, including screen -function start_trove() { +function start_trove { screen_it tr-api "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug 2>&1" screen_it tr-tmgr "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug 2>&1" screen_it tr-cond "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug 2>&1" } # stop_trove() - Stop running processes -function stop_trove() { +function stop_trove { # Kill the trove screen windows for serv in tr-api tr-tmgr tr-cond; do screen_stop $serv diff --git a/stack.sh b/stack.sh index ac89e52515..a70267984c 100755 --- a/stack.sh +++ b/stack.sh @@ -464,7 +464,7 @@ fi # ----------------- # Draw a spinner so the user knows something is happening -function spinner() { +function spinner { local delay=0.75 local spinstr='/-\|' printf "..." >&3 @@ -479,7 +479,7 @@ function spinner() { # Echo text to the log file, summary log file and stdout # echo_summary "something to say" -function echo_summary() { +function echo_summary { if [[ -t 3 && "$VERBOSE" != "True" ]]; then kill >/dev/null 2>&1 $LAST_SPINNER_PID if [ ! -z "$LAST_SPINNER_PID" ]; then @@ -495,7 +495,7 @@ function echo_summary() { # Echo text only to stdout, no log files # echo_nolog "something not for the logs" -function echo_nolog() { +function echo_nolog { echo $@ >&3 } diff --git a/tests/functions.sh b/tests/functions.sh index 06a4134abf..874d02230d 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -42,7 +42,7 @@ fi echo "Testing enable_service()" -function test_enable_service() { +function test_enable_service { local start="$1" local add="$2" local finish="$3" @@ -68,7 +68,7 @@ test_enable_service 'a,b,c' c 'a,b,c' test_enable_service 'a,b,-c' c 'a,b' test_enable_service 'a,b,c' -c 'a,b' -function test_disable_service() { +function test_disable_service { local start="$1" local del="$2" local finish="$3" @@ -109,7 +109,7 @@ fi echo "Testing disable_negated_services()" -function test_disable_negated_services() { +function test_disable_negated_services { local start="$1" local finish="$2" diff --git a/tests/test_config.sh b/tests/test_config.sh index 39603c9dbe..5700f8df29 100755 --- a/tests/test_config.sh +++ b/tests/test_config.sh @@ -12,7 +12,7 @@ source $TOP/lib/config # check_result() tests and reports the result values # check_result "actual" "expected" -function check_result() { +function check_result { local actual=$1 local expected=$2 if [[ "$actual" == "$expected" ]]; then @@ -26,7 +26,7 @@ TEST_1C_ADD="[eee] type=new multi = foo2" -function create_test1c() { +function create_test1c { cat >test1c.conf <test2a.conf <\w+)", line) @@ -169,6 +184,7 @@ def check_files(files, verbose): check_indents(logical_line) check_for_do(logical_line) check_if_then(logical_line) + check_function_decl(logical_line) prev_line = logical_line prev_lineno = fileinput.filelineno() diff --git a/tools/build_pxe_env.sh b/tools/build_pxe_env.sh index e6f98b4b75..50d91d063c 100755 --- a/tools/build_pxe_env.sh +++ b/tools/build_pxe_env.sh @@ -17,7 +17,7 @@ PXEDIR=${PXEDIR:-/opt/ramstack/pxe} PROGDIR=`dirname $0` # Clean up any resources that may be in use -cleanup() { +function cleanup { set +o errexit # Mop up temporary files diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh index 737255578a..50ba8ef2ca 100755 --- a/tools/build_ramdisk.sh +++ b/tools/build_ramdisk.sh @@ -14,7 +14,7 @@ if [ ! "$#" -eq "1" ]; then fi # Clean up any resources that may be in use -cleanup() { +function cleanup { set +o errexit # Mop up temporary files @@ -87,7 +87,7 @@ fi # Finds and returns full device path for the next available NBD device. # Exits script if error connecting or none free. # map_nbd image -function map_nbd() { +function map_nbd { for i in `seq 0 15`; do if [ ! -e /sys/block/nbd$i/pid ]; then NBD=/dev/nbd$i diff --git a/tools/build_uec_ramdisk.sh b/tools/build_uec_ramdisk.sh index 3ab5dafdcb..5f3acc5684 100755 --- a/tools/build_uec_ramdisk.sh +++ b/tools/build_uec_ramdisk.sh @@ -20,7 +20,7 @@ if ! egrep -q "oneiric" /etc/lsb-release; then fi # Clean up resources that may be in use -cleanup() { +function cleanup { set +o errexit if [ -n "$MNT_DIR" ]; then diff --git a/tools/build_usb_boot.sh b/tools/build_usb_boot.sh index 8566229833..c97e0a143d 100755 --- a/tools/build_usb_boot.sh +++ b/tools/build_usb_boot.sh @@ -13,7 +13,7 @@ DEST_DIR=${1:-/tmp/syslinux-boot} PXEDIR=${PXEDIR:-/opt/ramstack/pxe} # Clean up any resources that may be in use -cleanup() { +function cleanup { set +o errexit # Mop up temporary files diff --git a/tools/copy_dev_environment_to_uec.sh b/tools/copy_dev_environment_to_uec.sh index 3fd4423f86..94a4926668 100755 --- a/tools/copy_dev_environment_to_uec.sh +++ b/tools/copy_dev_environment_to_uec.sh @@ -22,7 +22,7 @@ cd $TOP_DIR source ./stackrc # Echo usage -usage() { +function usage { echo "Add stack user and keys" echo "" echo "Usage: $0 [full path to raw uec base image]" diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index cd5a1c9643..47da3341b8 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -11,8 +11,7 @@ set -o xtrace ACCOUNT_DIR=./accrc -display_help() -{ +function display_help { cat < @@ -151,7 +150,7 @@ if ! nova x509-get-root-cert "$EUCALYPTUS_CERT"; then fi -function add_entry(){ +function add_entry { local user_id=$1 local user_name=$2 local tenant_id=$3 @@ -213,7 +212,7 @@ EOF } #admin users expected -function create_or_get_tenant(){ +function create_or_get_tenant { local tenant_name=$1 local tenant_id=`keystone tenant-list | awk '/\|[[:space:]]*'"$tenant_name"'[[:space:]]*\|.*\|/ {print $2}'` if [ -n "$tenant_id" ]; then @@ -223,7 +222,7 @@ function create_or_get_tenant(){ fi } -function create_or_get_role(){ +function create_or_get_role { local role_name=$1 local role_id=`keystone role-list| awk '/\|[[:space:]]*'"$role_name"'[[:space:]]*\|/ {print $2}'` if [ -n "$role_id" ]; then @@ -234,7 +233,7 @@ function create_or_get_role(){ } # Provides empty string when the user does not exists -function get_user_id(){ +function get_user_id { local user_name=$1 keystone user-list | awk '/^\|[^|]*\|[[:space:]]*'"$user_name"'[[:space:]]*\|.*\|/ {print $2}' } diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 47b0cd10cd..7833278a12 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -40,7 +40,7 @@ FILES=$TOP_DIR/files # --------------- # get_package_path python-package # in import notation -function get_package_path() { +function get_package_path { local package=$1 echo $(python -c "import os; import $package; print(os.path.split(os.path.realpath($package.__file__))[0])") } diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh index da13f4b875..225742c041 100755 --- a/tools/get_uec_image.sh +++ b/tools/get_uec_image.sh @@ -18,7 +18,7 @@ TOP_DIR=$(cd $TOOLS_DIR/..; pwd) set -o errexit set -o xtrace -usage() { +function usage { echo "Usage: $0 - Download and prepare Ubuntu UEC images" echo "" echo "$0 [-r rootsize] release imagefile [kernel]" @@ -31,7 +31,7 @@ usage() { } # Clean up any resources that may be in use -cleanup() { +function cleanup { set +o errexit # Mop up temporary files diff --git a/tools/info.sh b/tools/info.sh index 1e521b9c4b..a8f9544073 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -61,7 +61,7 @@ fi # ----- # git_report -function git_report() { +function git_report { local dir=$1 local proj ref branch head if [[ -d $dir/.git ]]; then diff --git a/tools/install_openvpn.sh b/tools/install_openvpn.sh index 2f52aa14d0..9a4f0369d5 100755 --- a/tools/install_openvpn.sh +++ b/tools/install_openvpn.sh @@ -22,7 +22,7 @@ if [ -e vpnrc ]; then fi # Do some IP manipulation -function cidr2netmask() { +function cidr2netmask { set -- $(( 5 - ($1 / 8) )) 255 255 255 255 $(( (255 << (8 - ($1 % 8))) & 255 )) 0 0 0 if [[ $1 -gt 1 ]]; then shift $1 @@ -50,7 +50,7 @@ VPN_CLIENT_DHCP="${VPN_CLIENT_DHCP:-net.1 net.254}" VPN_DIR=/etc/openvpn CA_DIR=$VPN_DIR/easy-rsa -usage() { +function usage { echo "$0 - OpenVPN install and certificate generation" echo "" echo "$0 --client name" @@ -102,7 +102,7 @@ if [ ! -r $CA_DIR/keys/dh1024.pem ]; then openvpn --genkey --secret $CA_DIR/keys/ta.key ## Build a TLS key fi -do_server() { +function do_server { NAME=$1 # Generate server certificate $CA_DIR/pkitool --server $NAME @@ -162,7 +162,7 @@ EOF /etc/init.d/openvpn restart } -do_client() { +function do_client { NAME=$1 # Generate a client certificate $CA_DIR/pkitool $NAME diff --git a/tools/install_pip.sh b/tools/install_pip.sh index d714d33530..9fa161e043 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -50,7 +50,7 @@ PIP_TAR_URL=https://pypi.python.org/packages/source/p/pip/pip-$INSTALL_PIP_VERSI GetDistro echo "Distro: $DISTRO" -function get_versions() { +function get_versions { PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || true) if [[ -n $PIP ]]; then PIP_VERSION=$($PIP --version | awk '{ print $2}') @@ -61,7 +61,7 @@ function get_versions() { } -function install_get_pip() { +function install_get_pip { if [[ ! -r $FILES/get-pip.py ]]; then (cd $FILES; \ curl -O $PIP_GET_PIP_URL; \ @@ -70,7 +70,7 @@ function install_get_pip() { sudo -E python $FILES/get-pip.py } -function install_pip_tarball() { +function install_pip_tarball { (cd $FILES; \ curl -O $PIP_TAR_URL; \ tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz 1>/dev/null; \ diff --git a/tools/jenkins/build_configuration.sh b/tools/jenkins/build_configuration.sh index e295ef2017..64ee159651 100755 --- a/tools/jenkins/build_configuration.sh +++ b/tools/jenkins/build_configuration.sh @@ -5,7 +5,7 @@ CONFIGURATION=$2 ADAPTER=$3 RC=$4 -function usage() { +function usage { echo "Usage: $0 - Build a configuration" echo "" echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh index d9a160ad76..6927fd7c29 100755 --- a/tools/jenkins/configurations/kvm.sh +++ b/tools/jenkins/configurations/kvm.sh @@ -9,7 +9,7 @@ CONFIGURATION=$2 ADAPTER=$3 RC=$4 -function usage() { +function usage { echo "Usage: $0 - Build a test configuration" echo "" echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" diff --git a/tools/jenkins/configurations/xs.sh b/tools/jenkins/configurations/xs.sh index 864f949114..7b671e9df4 100755 --- a/tools/jenkins/configurations/xs.sh +++ b/tools/jenkins/configurations/xs.sh @@ -8,7 +8,7 @@ CONFIGURATION=$2 ADAPTER=$3 RC=$4 -function usage() { +function usage { echo "Usage: $0 - Build a test configuration" echo "" echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" diff --git a/tools/jenkins/run_test.sh b/tools/jenkins/run_test.sh index 464956375e..d2b82843b4 100755 --- a/tools/jenkins/run_test.sh +++ b/tools/jenkins/run_test.sh @@ -4,7 +4,7 @@ EXECUTOR_NUMBER=$1 ADAPTER=$2 RC=$3 -function usage() { +function usage { echo "Usage: $0 - Run a test" echo "" echo "$0 [EXECUTOR_NUMBER] [ADAPTER] [RC (optional)]" diff --git a/tools/warm_apts_for_uec.sh b/tools/warm_apts_for_uec.sh index 3c15f52ee3..c57fc2e59c 100755 --- a/tools/warm_apts_for_uec.sh +++ b/tools/warm_apts_for_uec.sh @@ -16,7 +16,7 @@ TOP_DIR=`cd $TOOLS_DIR/..; pwd` cd $TOP_DIR # Echo usage -usage() { +function usage { echo "Cache OpenStack dependencies on a uec image to speed up performance." echo "" echo "Usage: $0 [full path to raw uec base image]" diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index fbbfd6fbe5..cc3cbe18d1 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -42,7 +42,7 @@ source xenrc # GUEST_NAME="$1" -function _print_interface_config() { +function _print_interface_config { local device_nr local ip_address local netmask @@ -68,7 +68,7 @@ function _print_interface_config() { echo " post-up ethtool -K $device tx off" } -function print_interfaces_config() { +function print_interfaces_config { echo "auto lo" echo "iface lo inet loopback" diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 7b59bae6b8..a4b3e06e88 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -166,7 +166,7 @@ TNAME="jeos_template_for_devstack" SNAME_TEMPLATE="jeos_snapshot_for_devstack" SNAME_FIRST_BOOT="before_first_boot" -function wait_for_VM_to_halt() { +function wait_for_VM_to_halt { set +x echo "Waiting for the VM to halt. Progress in-VM can be checked with vncviewer:" mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.') @@ -318,7 +318,7 @@ xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_FIRST_BOOT" # xe vm-start vm="$GUEST_NAME" -function ssh_no_check() { +function ssh_no_check { ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$@" } @@ -349,7 +349,7 @@ DOMID=$(get_domid "$GUEST_NAME") xenstore-write /local/domain/$DOMID/authorized_keys/$DOMZERO_USER "$(cat /root/dom0key.pub)" xenstore-chmod -u /local/domain/$DOMID/authorized_keys/$DOMZERO_USER r$DOMID -function run_on_appliance() { +function run_on_appliance { ssh \ -i /root/dom0key \ -o UserKnownHostsFile=/dev/null \ diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index 094612624b..440774ec5b 100755 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -21,7 +21,7 @@ STACK_USER="$3" DOMZERO_USER="$4" -function setup_domzero_user() { +function setup_domzero_user { local username username="$1" From e2907b4838230940a8ff1735feffd80acf13bdab Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 26 Feb 2014 17:35:37 -0600 Subject: [PATCH 0199/4119] Fix Neutron enabled check * Remove the check for neutron enabled on a block of variable settings, there is no conflict and serves no purpose. * Also floating_ips.sh and volume.sh needed to properly source lib/neutron for do ping_check() to work properly. The current error in check-devstack-dsvm-neutron is not related to this fix. Change-Id: I1c458aaa787ffb98c945aefc3afa80c6861a405f --- exercises/floating_ips.sh | 6 +- exercises/volumes.sh | 7 +- lib/neutron | 154 +++++++++++++++++++------------------- 3 files changed, 84 insertions(+), 83 deletions(-) diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index b981aa8294..8dc44effbc 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -27,12 +27,12 @@ TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions source $TOP_DIR/functions -# Import project functions -source $TOP_DIR/lib/neutron - # Import configuration source $TOP_DIR/openrc +# Import project functions +source $TOP_DIR/lib/neutron + # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 33e24589eb..83d25c779c 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -27,12 +27,13 @@ TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions source $TOP_DIR/functions -# Import project functions -source $TOP_DIR/lib/cinder - # Import configuration source $TOP_DIR/openrc +# Import project functions +source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/neutron + # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/lib/neutron b/lib/neutron index df276c71d5..be123adcd5 100644 --- a/lib/neutron +++ b/lib/neutron @@ -59,10 +59,6 @@ # LinuxBridge plugin, please see the top level README file under the # Neutron section. -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - # Neutron Network Configuration # ----------------------------- @@ -127,82 +123,81 @@ Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-False} # See _configure_neutron_common() for details about setting it up declare -a Q_PLUGIN_EXTRA_CONF_FILES -if is_service_enabled neutron; then - Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf - if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - Q_RR_COMMAND="sudo" - else - NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) - Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" - fi - - # Provider Network Configurations - # -------------------------------- - - # The following variables control the Neutron openvswitch and - # linuxbridge plugins' allocation of tenant networks and - # availability of provider networks. If these are not configured - # in ``localrc``, tenant networks will be local to the host (with no - # remote connectivity), and no physical resources will be - # available for the allocation of provider networks. - - # To use GRE tunnels for tenant networks, set to True in - # ``localrc``. GRE tunnels are only supported by the openvswitch - # plugin, and currently only on Ubuntu. - ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False} - - # If using GRE tunnels for tenant networks, specify the range of - # tunnel IDs from which tenant networks are allocated. Can be - # overriden in ``localrc`` in necesssary. - TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000} - - # To use VLANs for tenant networks, set to True in localrc. VLANs - # are supported by the openvswitch and linuxbridge plugins, each - # requiring additional configuration described below. - ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} - - # If using VLANs for tenant networks, set in ``localrc`` to specify - # the range of VLAN VIDs from which tenant networks are - # allocated. An external network switch must be configured to - # trunk these VLANs between hosts for multi-host connectivity. - # - # Example: ``TENANT_VLAN_RANGE=1000:1999`` - TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} - - # If using VLANs for tenant networks, or if using flat or VLAN - # provider networks, set in ``localrc`` to the name of the physical - # network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the - # openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge - # agent, as described below. - # - # Example: ``PHYSICAL_NETWORK=default`` - PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} - - # With the openvswitch plugin, if using VLANs for tenant networks, - # or if using flat or VLAN provider networks, set in ``localrc`` to - # the name of the OVS bridge to use for the physical network. The - # bridge will be created if it does not already exist, but a - # physical interface must be manually added to the bridge as a - # port for external connectivity. - # - # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` - OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} - - # With the linuxbridge plugin, if using VLANs for tenant networks, - # or if using flat or VLAN provider networks, set in ``localrc`` to - # the name of the network interface to use for the physical - # network. - # - # Example: ``LB_PHYSICAL_INTERFACE=eth1`` - LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} - # With the openvswitch plugin, set to True in ``localrc`` to enable - # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. - # - # Example: ``OVS_ENABLE_TUNNELING=True`` - OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} +Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf +if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + Q_RR_COMMAND="sudo" +else + NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) + Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" fi +# Provider Network Configurations +# -------------------------------- + +# The following variables control the Neutron openvswitch and +# linuxbridge plugins' allocation of tenant networks and +# availability of provider networks. If these are not configured +# in ``localrc``, tenant networks will be local to the host (with no +# remote connectivity), and no physical resources will be +# available for the allocation of provider networks. + +# To use GRE tunnels for tenant networks, set to True in +# ``localrc``. GRE tunnels are only supported by the openvswitch +# plugin, and currently only on Ubuntu. +ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False} + +# If using GRE tunnels for tenant networks, specify the range of +# tunnel IDs from which tenant networks are allocated. Can be +# overriden in ``localrc`` in necesssary. +TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000} + +# To use VLANs for tenant networks, set to True in localrc. VLANs +# are supported by the openvswitch and linuxbridge plugins, each +# requiring additional configuration described below. +ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} + +# If using VLANs for tenant networks, set in ``localrc`` to specify +# the range of VLAN VIDs from which tenant networks are +# allocated. An external network switch must be configured to +# trunk these VLANs between hosts for multi-host connectivity. +# +# Example: ``TENANT_VLAN_RANGE=1000:1999`` +TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} + +# If using VLANs for tenant networks, or if using flat or VLAN +# provider networks, set in ``localrc`` to the name of the physical +# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the +# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge +# agent, as described below. +# +# Example: ``PHYSICAL_NETWORK=default`` +PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} + +# With the openvswitch plugin, if using VLANs for tenant networks, +# or if using flat or VLAN provider networks, set in ``localrc`` to +# the name of the OVS bridge to use for the physical network. The +# bridge will be created if it does not already exist, but a +# physical interface must be manually added to the bridge as a +# port for external connectivity. +# +# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` +OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} + +# With the linuxbridge plugin, if using VLANs for tenant networks, +# or if using flat or VLAN provider networks, set in ``localrc`` to +# the name of the network interface to use for the physical +# network. +# +# Example: ``LB_PHYSICAL_INTERFACE=eth1`` +LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} + +# With the openvswitch plugin, set to True in ``localrc`` to enable +# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. +# +# Example: ``OVS_ENABLE_TUNNELING=True`` +OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} + # Neutron plugin specific functions # --------------------------------- @@ -241,6 +236,11 @@ fi TEMPEST_SERVICES+=,neutron +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + # Functions # --------- From 1237922b655d8ab1690b88c718d7002415ce1201 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 27 Feb 2014 17:16:46 -0500 Subject: [PATCH 0200/4119] make service_check fatal if we fail service check, we should do so in a fatal way, because something is not right. This will be very useful in grenade. Change-Id: I18811b0d8e6d06f364685c366cdc8f5dda3b8f7e --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 2248fbb610..ab7bc2139b 100644 --- a/functions-common +++ b/functions-common @@ -1135,7 +1135,7 @@ function service_check() { done if [ -n "$failures" ]; then - echo "More details about the above errors can be found with screen, with ./rejoin-stack.sh" + die $LINENO "More details about the above errors can be found with screen, with ./rejoin-stack.sh" fi } From c03f975150bf97b5aef42daa77fc419a9e241123 Mon Sep 17 00:00:00 2001 From: sukhdev Date: Thu, 27 Feb 2014 14:17:44 -0800 Subject: [PATCH 0201/4119] devstack (stack.sh) fails when extra config files are specified Latest merge of https://review.openstack.org/#/c/71996/ exposes an issue in stack.sh which did not surface before. Please see the details of the issue in the bug description. Closes bug: 1285884 Change-Id: Ie231c9835497c2a418a61d339dfd5df1aab9e3d7 --- lib/neutron | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index df276c71d5..07b21af336 100644 --- a/lib/neutron +++ b/lib/neutron @@ -586,11 +586,9 @@ function _configure_neutron_common() { # If additional config files exist, copy them over to neutron configuration # directory if [[ $Q_PLUGIN_EXTRA_CONF_PATH != '' ]]; then - mkdir -p /$Q_PLUGIN_EXTRA_CONF_PATH local f for (( f=0; $f < ${#Q_PLUGIN_EXTRA_CONF_FILES[@]}; f+=1 )); do Q_PLUGIN_EXTRA_CONF_FILES[$f]=$Q_PLUGIN_EXTRA_CONF_PATH/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} - cp $NEUTRON_DIR/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} /${Q_PLUGIN_EXTRA_CONF_FILES[$f]} done fi From 531aeb7900fd7f24794efb8f9da5fce65dc80f4b Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 28 Feb 2014 11:24:29 +1100 Subject: [PATCH 0202/4119] Preinstall yum-utils; move sudo check before install Some cloud images don't have yum-utils installed, so the call to yum-config-manager fails. Pre-install it (I still think it's easier than fiddling config files). Also, these repo setup steps are using sudo, but the root/sudo checks happen after this. Move them up before we start trying to do repo/package installs. Change-Id: I875e1f0663c9badc00278b2cc1a3b04ca3dde9fc --- stack.sh | 91 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 46 insertions(+), 45 deletions(-) diff --git a/stack.sh b/stack.sh index ac89e52515..669209c865 100755 --- a/stack.sh +++ b/stack.sh @@ -161,9 +161,42 @@ fi # Set up logging level VERBOSE=$(trueorfalse True $VERBOSE) +# root Access +# ----------- + +# OpenStack is designed to be run as a non-root user; Horizon will fail to run +# as **root** since Apache will not serve content from **root** user). +# ``stack.sh`` must not be run as **root**. It aborts and suggests one course of +# action to create a suitable user account. + +if [[ $EUID -eq 0 ]]; then + echo "You are running this script as root." + echo "Cut it out." + echo "Really." + echo "If you need an account to run DevStack, do this (as root, heh) to create $STACK_USER:" + echo "$TOP_DIR/tools/create-stack-user.sh" + exit 1 +fi + +# We're not **root**, make sure ``sudo`` is available +is_package_installed sudo || install_package sudo + +# UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one +sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || + echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers + +# Set up devstack sudoers +TEMPFILE=`mktemp` +echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE +# Some binaries might be under /sbin or /usr/sbin, so make sure sudo will +# see them by forcing PATH +echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE +chmod 0440 $TEMPFILE +sudo chown root:root $TEMPFILE +sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh # Additional repos -# ================ +# ---------------- # Some distros need to add repos beyond the defaults provided by the vendor # to pick up required packages. @@ -196,45 +229,13 @@ if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then fi # ... and also optional to be enabled + is_package_installed yum-utils || install_package yum-utils sudo yum-config-manager --enable rhel-6-server-optional-rpms fi - -# root Access -# ----------- - -# OpenStack is designed to be run as a non-root user; Horizon will fail to run -# as **root** since Apache will not serve content from **root** user). -# ``stack.sh`` must not be run as **root**. It aborts and suggests one course of -# action to create a suitable user account. - -if [[ $EUID -eq 0 ]]; then - echo "You are running this script as root." - echo "Cut it out." - echo "Really." - echo "If you need an account to run DevStack, do this (as root, heh) to create $STACK_USER:" - echo "$TOP_DIR/tools/create-stack-user.sh" - exit 1 -fi - -# We're not **root**, make sure ``sudo`` is available -is_package_installed sudo || install_package sudo - -# UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one -sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || - echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers - -# Set up devstack sudoers -TEMPFILE=`mktemp` -echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE -# Some binaries might be under /sbin or /usr/sbin, so make sure sudo will -# see them by forcing PATH -echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE -chmod 0440 $TEMPFILE -sudo chown root:root $TEMPFILE -sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh - +# Filesystem setup +# ---------------- # Create the destination directory and ensure it is writable by the user # and read/executable by everybody for daemons (e.g. apache run for horizon) @@ -252,6 +253,15 @@ if [ -z "`grep ^127.0.0.1 /etc/hosts | grep $LOCAL_HOSTNAME`" ]; then sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts fi +# Destination path for service data +DATA_DIR=${DATA_DIR:-${DEST}/data} +sudo mkdir -p $DATA_DIR +safe_chown -R $STACK_USER $DATA_DIR + + +# Common Configuration +# -------------------- + # Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without # Internet access. ``stack.sh`` must have been previously run with Internet # access to install prerequisites and fetch repositories. @@ -265,15 +275,6 @@ ERROR_ON_CLONE=`trueorfalse False $ERROR_ON_CLONE` # Whether to enable the debug log level in OpenStack services ENABLE_DEBUG_LOG_LEVEL=`trueorfalse True $ENABLE_DEBUG_LOG_LEVEL` -# Destination path for service data -DATA_DIR=${DATA_DIR:-${DEST}/data} -sudo mkdir -p $DATA_DIR -safe_chown -R $STACK_USER $DATA_DIR - - -# Common Configuration -# ==================== - # Set fixed and floating range here so we can make sure not to use addresses # from either range when attempting to guess the IP to use for the host. # Note that setting FIXED_RANGE may be necessary when running DevStack From cb415697f37d3df2965f71b19c909a4c50f32eed Mon Sep 17 00:00:00 2001 From: Shashank Hegde Date: Thu, 27 Feb 2014 16:46:43 -0800 Subject: [PATCH 0203/4119] clean.sh removes all the files clean.sh was incorrectly looping over the list of files to remove. Because of this the files were not being removed. Change-Id: Ie0559e1d396a4d35df6a12dfbceefa7eb261bac5 Closes-Bug:1285924 --- clean.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clean.sh b/clean.sh index e121e4f703..3707d8411e 100755 --- a/clean.sh +++ b/clean.sh @@ -123,6 +123,6 @@ fi FILES_TO_CLEAN=".localrc.auto docs-files docs/ shocco/ stack-screenrc test*.conf* test.ini*" FILES_TO_CLEAN+=".stackenv .prereqs" -for file in FILES_TO_CLEAN; do +for file in $FILES_TO_CLEAN; do rm -f $TOP_DIR/$file done From d20f632a70565003ab8c72b2598201be79f4d782 Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Fri, 28 Feb 2014 09:22:37 +0900 Subject: [PATCH 0204/4119] Move some comments of variables to right place setup_develop*() in functions has been moved to functions-common. But some comments about the variables are still left. This commit moves it to the right place. Change-Id: Ic360454f1ee72f51c9979d0468dee0913e9b32e4 --- functions | 4 ---- functions-common | 3 +++ 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/functions b/functions index 3101111c63..407a9e708c 100644 --- a/functions +++ b/functions @@ -6,10 +6,6 @@ # - ``ENABLED_SERVICES`` # - ``FILES`` # - ``GLANCE_HOSTPORT`` -# - ``REQUIREMENTS_DIR`` -# - ``STACK_USER`` -# - ``TRACK_DEPENDS`` -# - ``UNDO_REQUIREMENTS`` # # Include the common functions diff --git a/functions-common b/functions-common index c93dd855b3..a485cae9d9 100644 --- a/functions-common +++ b/functions-common @@ -26,7 +26,10 @@ # - ``PIP_DOWNLOAD_CACHE`` # - ``PIP_USE_MIRRORS`` # - ``RECLONE`` +# - ``REQUIREMENTS_DIR`` +# - ``STACK_USER`` # - ``TRACK_DEPENDS`` +# - ``UNDO_REQUIREMENTS`` # - ``http_proxy``, ``https_proxy``, ``no_proxy`` # Save trace setting From 9bbecb7fc45538bc83d7db5e33a55505a691b44d Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Fri, 28 Feb 2014 11:19:28 -0500 Subject: [PATCH 0205/4119] Source lib/neutron in boot_from_volume.sh Without lib/neutron, boot_from_volume.sh generates the following error: + _ping_check_neutron private 10.11.12.5 30 /devstack/functions: line 356: _ping_check_neutron: command not found Change-Id: Ib72c3f24d614570d69bf5dda35cbaf5847b1d1b9 --- exercises/boot_from_volume.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 79120460b8..f679669eea 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -32,6 +32,7 @@ source $TOP_DIR/functions # Import project functions source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/neutron # Import configuration source $TOP_DIR/openrc From 0e57b967e558fa843277d0119e50f0cb807929a2 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Fri, 28 Feb 2014 09:09:52 +0100 Subject: [PATCH 0206/4119] Devstack install can fail on missing xinetd.d/rsync config Assuming if the system does not have the xinetd.d/rsync, the dedicated service is the prefered way. Change-Id: Ic42651c5c3fb5bf0099786ca81a7bd06ace896a8 --- lib/swift | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/swift b/lib/swift index 59c1e54d8a..5d4d4ef506 100644 --- a/lib/swift +++ b/lib/swift @@ -301,7 +301,7 @@ function configure_swift { # rsyncd.conf just prepared for 4 nodes if is_ubuntu; then sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync - else + elif [ -e /etc/xinetd.d/rsync ]; then sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync fi @@ -635,8 +635,10 @@ function start_swift { # Start rsync if is_ubuntu; then sudo /etc/init.d/rsync restart || : + elif [ -e /etc/xinetd.d/rsync ]; then + start_service xinetd else - sudo systemctl start xinetd.service + start_service rsyncd fi if is_apache_enabled_service swift; then From 2e978dd6286a33af72796dc97cd81ed5fa2255de Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Fri, 28 Feb 2014 14:06:59 -0500 Subject: [PATCH 0207/4119] Add use_syslog to Marconi config This patch adds use_syslog option to the marconi config file. This is needed to allow marconi to run, when USE_SCREEN is set to False in devstack. Change-Id: I547697ec2745975e235a4e58cde81132ac37b70d --- lib/marconi | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/marconi b/lib/marconi index 8cfc55c1dd..29ae386d9f 100644 --- a/lib/marconi +++ b/lib/marconi @@ -95,6 +95,7 @@ function configure_marconi { sudo chown $USER $MARCONI_API_LOG_DIR iniset $MARCONI_CONF DEFAULT verbose True + iniset $MARCONI_CONF DEFAULT use_syslog $SYSLOG iniset $MARCONI_CONF 'drivers:transport:wsgi' bind $MARCONI_SERVICE_HOST iniset $MARCONI_CONF keystone_authtoken auth_protocol http From e994f5708d124ae71211876e9456499ac25646a3 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Fri, 28 Feb 2014 15:13:37 -0500 Subject: [PATCH 0208/4119] Remove bm_poseur, unmaintained and obsolete The bm_poseur git repository link has been broken for over 11 months. The virtualized/fake baremetal environment is not working and has not worked in a long time. Now, on the tail of enabling 'enable -o errexit', this functionality now has a hard break. Change-Id: I3cbd8db58c422bc5273d2433278aaa5e449ecfd9 Closes-Bug: 1285954 --- lib/baremetal | 44 ++++---------------------------------------- stack.sh | 3 --- stackrc | 6 ------ unstack.sh | 5 ----- 4 files changed, 4 insertions(+), 54 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index 473de0dd39..1d02e1e417 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -77,14 +77,6 @@ BM_POWER_MANAGER=${BM_POWER_MANAGER:-nova.virt.baremetal.fake.FakePowerManager} # These should be customized to your environment and hardware # ----------------------------------------------------------- -# whether to create a fake environment, eg. for devstack-gate -BM_USE_FAKE_ENV=`trueorfalse False $BM_USE_FAKE_ENV` - -# Extra options to pass to bm_poseur -# change the bridge name or IP: --bridge br99 --bridge-ip 192.0.2.1 -# change the virtualization type: --engine qemu -BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-} - # To provide PXE, configure nova-network's dnsmasq rather than run the one # dedicated to baremetal. When enable this, make sure these conditions are # fulfilled: @@ -97,15 +89,10 @@ BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-} BM_DNSMASQ_FROM_NOVA_NETWORK=`trueorfalse False $BM_DNSMASQ_FROM_NOVA_NETWORK` # BM_DNSMASQ_IFACE should match FLAT_NETWORK_BRIDGE -if [ "$BM_USE_FAKE_ENV" ]; then - BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-br99} - BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-192.0.2.32,192.0.2.48} -else - BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0} - # if testing on a physical network, - # BM_DNSMASQ_RANGE must be changed to suit your network - BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-} -fi +BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0} +# if testing on a physical network, +# BM_DNSMASQ_RANGE must be changed to suit your network +BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-} # BM_DNSMASQ_DNS provide dns server to bootstrap clients BM_DNSMASQ_DNS=${BM_DNSMASQ_DNS:-} @@ -143,7 +130,6 @@ BM_FLAVOR_ARCH=${BM_FLAVOR_ARCH:-$BM_CPU_ARCH} # Below this, we set some path and filenames. # Defaults are probably sufficient. BM_IMAGE_BUILD_DIR=${BM_IMAGE_BUILD_DIR:-$DEST/diskimage-builder} -BM_POSEUR_DIR=${BM_POSEUR_DIR:-$DEST/bm_poseur} # Use DIB to create deploy ramdisk and kernel. BM_BUILD_DEPLOY_RAMDISK=`trueorfalse True $BM_BUILD_DEPLOY_RAMDISK` @@ -177,7 +163,6 @@ function is_baremetal { # so that we can build the deployment kernel & ramdisk function prepare_baremetal_toolchain { git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH - git_clone $BM_POSEUR_REPO $BM_POSEUR_DIR $BM_POSEUR_BRANCH local shellinabox_basename=$(basename $BM_SHELL_IN_A_BOX) if [[ ! -e $DEST/$shellinabox_basename ]]; then @@ -196,27 +181,6 @@ function prepare_baremetal_toolchain { fi } -# set up virtualized environment for devstack-gate testing -function create_fake_baremetal_env { - local bm_poseur="$BM_POSEUR_DIR/bm_poseur" - # TODO(deva): add support for >1 VM - sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-bridge - sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-vm - BM_FIRST_MAC=$(sudo $bm_poseur get-macs) - - # NOTE: there is currently a limitation in baremetal driver - # that requires second MAC even if it is not used. - # Passing a fake value allows this to work. - # TODO(deva): remove this after driver issue is fixed. - BM_SECOND_MAC='12:34:56:78:90:12' -} - -function cleanup_fake_baremetal_env { - local bm_poseur="$BM_POSEUR_DIR/bm_poseur" - sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-vm - sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-bridge -} - # prepare various directories needed by baremetal hypervisor function configure_baremetal_nova_dirs { # ensure /tftpboot is prepared diff --git a/stack.sh b/stack.sh index 0ec0e0dc93..5152b2a430 100755 --- a/stack.sh +++ b/stack.sh @@ -1052,9 +1052,6 @@ if is_service_enabled nova && is_baremetal; then echo_summary "Preparing for nova baremetal" prepare_baremetal_toolchain configure_baremetal_nova_dirs - if [[ "$BM_USE_FAKE_ENV" = "True" ]]; then - create_fake_baremetal_env - fi fi diff --git a/stackrc b/stackrc index f235cccb15..6bb6f37195 100644 --- a/stackrc +++ b/stackrc @@ -229,12 +229,6 @@ TEMPEST_BRANCH=${TEMPEST_BRANCH:-master} BM_IMAGE_BUILD_REPO=${BM_IMAGE_BUILD_REPO:-${GIT_BASE}/openstack/diskimage-builder.git} BM_IMAGE_BUILD_BRANCH=${BM_IMAGE_BUILD_BRANCH:-master} -# bm_poseur -# Used to simulate a hardware environment for baremetal -# Only used if BM_USE_FAKE_ENV is set -BM_POSEUR_REPO=${BM_POSEUR_REPO:-${GIT_BASE}/tripleo/bm_poseur.git} -BM_POSEUR_BRANCH=${BM_POSEUR_BRANCH:-master} - # a websockets/html5 or flash powered VNC console for vm instances NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git} NOVNC_BRANCH=${NOVNC_BRANCH:-master} diff --git a/unstack.sh b/unstack.sh index 6351fe0549..a5e7b879f9 100755 --- a/unstack.sh +++ b/unstack.sh @@ -127,11 +127,6 @@ if is_service_enabled tls-proxy; then killall stud fi -# baremetal might have created a fake environment -if is_service_enabled baremetal && [[ "$BM_USE_FAKE_ENV" = "True" ]]; then - cleanup_fake_baremetal_env -fi - SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* # Get the iSCSI volumes From 8f084c6b855a747467274facb1218837e0f53c88 Mon Sep 17 00:00:00 2001 From: Nicolas Simonds Date: Fri, 28 Feb 2014 17:01:41 -0800 Subject: [PATCH 0209/4119] use "rabbit_hosts" config option instead of "rabbit_host" This allows for easy client configuration against clustered RabbitMQ setups. Does not break existing configs. Change-Id: I2b180f8860a727e35d7b465253689e5e8c44eb98 Closes-Bug: 1286411 --- lib/rpc_backend | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index a0424b1dee..e922daa078 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -186,7 +186,7 @@ function iniset_rpc_backend { fi elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_kombu - iniset $file $section rabbit_host $RABBIT_HOST + iniset $file $section rabbit_hosts $RABBIT_HOST iniset $file $section rabbit_password $RABBIT_PASSWORD fi } From 12cb2299e8e4d933c7181ef1a9b97478214d2200 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 28 Feb 2014 19:53:50 -0500 Subject: [PATCH 0210/4119] nova changes for multinode working under -o errexit There was a stray inicomment on paste outside of a nova-api block. This fails under -o errexit because the paste.ini doesn't exist. Move this to inside the correct block. Change-Id: Iffbdae6716a1c2a8f650b68edd4faf436434eab1 --- lib/nova | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index 90b1ba4fde..583a5923ce 100644 --- a/lib/nova +++ b/lib/nova @@ -245,10 +245,9 @@ function configure_nova { inicomment $NOVA_API_PASTE_INI filter:authtoken cafile inicomment $NOVA_API_PASTE_INI filter:authtoken admin_user inicomment $NOVA_API_PASTE_INI filter:authtoken admin_password + inicomment $NOVA_API_PASTE_INI filter:authtoken signing_dir fi - inicomment $NOVA_API_PASTE_INI filter:authtoken signing_dir - if is_service_enabled n-cpu; then # Force IP forwarding on, just on case sudo sysctl -w net.ipv4.ip_forward=1 From 7083b8224dab423392e21b069a1a6ef54cd14a8f Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 28 Feb 2014 20:16:20 -0500 Subject: [PATCH 0211/4119] make ceilometer work if you don't enable ceilometer-api when doing ceilometer in a multihost devstack, you don't want ceilometer-api running on the computes. Under -o errexit this became fatal. Change-Id: Ie43c8724ba467b810f5a3b075dea45d66dde8648 --- lib/ceilometer | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index d20d628247..0be4184a37 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -206,9 +206,12 @@ function start_ceilometer { screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF" screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" - echo "Waiting for ceilometer-api to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then - die $LINENO "ceilometer-api did not start" + # only die on API if it was actually intended to be turned on + if service_enabled ceilometer-api; then + echo "Waiting for ceilometer-api to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then + die $LINENO "ceilometer-api did not start" + fi fi screen_it ceilometer-alarm-notifier "cd ; ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" From c921a95f63b00c549763c9968a103d44df590032 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 28 Feb 2014 21:09:33 -0500 Subject: [PATCH 0212/4119] only do a dbsync if on the database node ceilometer should only try to reset the database if it's actually on a node where there is a database. Change-Id: Ibcfec0556829bff0938e3769c19d34ae6c02b738 --- lib/ceilometer | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 0be4184a37..2e6e7c5a76 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -180,9 +180,11 @@ function init_ceilometer { sudo chown $STACK_USER $CEILOMETER_AUTH_CACHE_DIR rm -f $CEILOMETER_AUTH_CACHE_DIR/* - if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then - recreate_database ceilometer utf8 - $CEILOMETER_BIN_DIR/ceilometer-dbsync + if is_service_enabled mysql postgresql; then + if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then + recreate_database ceilometer utf8 + $CEILOMETER_BIN_DIR/ceilometer-dbsync + fi fi } From a8880cc22c540e88c43da4e49fa6c976361484e4 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Sat, 1 Mar 2014 00:24:51 -0500 Subject: [PATCH 0213/4119] Use glance image-show to check for uploaded Docker images The behavior of the code being replaced was failing with '-o errexit' should that, as in the common case, the image has not been uploaded into Glance. While we could workaround this using a '|| :', the existing code also happened to overwrite the DOCKER_IMAGE global which is used elsewhere. It seemed prudent to either change this variable name or remove it altogether. Finally, using 'glance image-show' is more deterministic than grepping the output of 'glance image-list'. Change-Id: I23188155966ae9db64259b4a9d25a0d98c63c912 Closes-Bug: 1286443 --- lib/nova_plugins/hypervisor-docker | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index f8dc6afa19..cdbc4d172d 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -104,8 +104,7 @@ function start_nova_hypervisor { fi # Make sure we copied the image in Glance - DOCKER_IMAGE=$(glance image-list | egrep " $DOCKER_IMAGE_NAME ") - if ! is_set DOCKER_IMAGE ; then + if ! (glance image-show "$DOCKER_IMAGE"); then docker push $DOCKER_REPOSITORY_NAME fi } From 5a110d4e684d5cf936621608003f6b30eb75c2b1 Mon Sep 17 00:00:00 2001 From: fumihiko kakuma Date: Wed, 29 Jan 2014 14:42:06 +0900 Subject: [PATCH 0214/4119] Add configurations for the OpenFlow Agent mechanism driver This patch supports configurations for an environment of the OpenFlow Agent mechanism driver Set the following variables in a localrc to be ran this mechanism driver. Q_ML2_PLUGIN_MECHANISM_DRIVERS=ofagent Q_AGENT=ofagent Implements: blueprint ryu-ml2-driver Change-Id: I774da9a26f241487dfa4ec124b12f528704d860b --- lib/neutron_plugins/ofagent_agent | 94 +++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 lib/neutron_plugins/ofagent_agent diff --git a/lib/neutron_plugins/ofagent_agent b/lib/neutron_plugins/ofagent_agent new file mode 100644 index 0000000000..724df41d4c --- /dev/null +++ b/lib/neutron_plugins/ofagent_agent @@ -0,0 +1,94 @@ +# OpenFlow Agent plugin +# ---------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/neutron_plugins/ovs_base +source $TOP_DIR/lib/neutron_thirdparty/ryu # for RYU_DIR, install_ryu, etc + +function neutron_plugin_create_nova_conf { + _neutron_ovs_base_configure_nova_vif_driver +} + +function neutron_plugin_install_agent_packages { + _neutron_ovs_base_install_agent_packages + + # This agent uses ryu to talk with switches + install_package $(get_packages "ryu") + install_ryu + configure_ryu +} + +function neutron_plugin_configure_debug_command { + _neutron_ovs_base_configure_debug_command +} + +function neutron_plugin_configure_dhcp_agent { + iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport +} + +function neutron_plugin_configure_l3_agent { + _neutron_ovs_base_configure_l3_agent + iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport +} + +function neutron_plugin_configure_plugin_agent { + # Set up integration bridge + _neutron_ovs_base_setup_bridge $OVS_BRIDGE + _neutron_ovs_base_configure_firewall_driver + + # Check a supported openflow version + OF_VERSION=`ovs-ofctl --version | grep "OpenFlow versions" | awk '{print $3}' | cut -d':' -f2` + if [ `vercmp_numbers "$OF_VERSION" "0x3"` -lt "0" ]; then + die $LINENO "This agent requires OpenFlow 1.3+ capable switch." + fi + + # Enable tunnel networks if selected + if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then + # Verify tunnels are supported + # REVISIT - also check kernel module support for GRE and patch ports + OVS_VERSION=`ovs-vsctl --version | head -n 1 | grep -E -o "[0-9]+\.[0-9]+"` + if [ `vercmp_numbers "$OVS_VERSION" "1.4"` -lt "0" ]; then + die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts." + fi + iniset /$Q_PLUGIN_CONF_FILE ovs enable_tunneling True + iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP + fi + + # Setup physical network bridge mappings. Override + # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more + # complex physical network configurations. + if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then + OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE + + # Configure bridge manually with physical interface as port for multi-node + sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE + fi + if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings $OVS_BRIDGE_MAPPINGS + fi + AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-ofagent-agent" + + # Define extra "AGENT" configuration options when q-agt is configured by defining + # defining the array ``Q_AGENT_EXTRA_AGENT_OPTS``. + # For Example: ``Q_AGENT_EXTRA_AGENT_OPTS=(foo=true bar=2)`` + for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + iniset /$Q_PLUGIN_CONF_FILE agent ${I/=/ } + done +} + +function neutron_plugin_setup_interface_driver { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver + iniset $conf_file DEFAULT ovs_use_veth True +} + +function neutron_plugin_check_adv_test_requirements { + is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 +} + +# Restore xtrace +$MY_XTRACE From 46c688c1ae2bdb0fc923635392a602efa3fd38c2 Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Mon, 24 Feb 2014 18:42:37 +0900 Subject: [PATCH 0215/4119] Fix pep8 errors This commit fixes pep8 errors. Change-Id: Ia1f1d61081a86b8a58251918392171cbc60f5ab8 --- tools/jenkins/jenkins_home/print_summary.py | 17 ++++++++++++-- tools/uec/meta.py | 25 ++++++++++++++++----- 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/tools/jenkins/jenkins_home/print_summary.py b/tools/jenkins/jenkins_home/print_summary.py index ea943e1caf..ee3790fcda 100755 --- a/tools/jenkins/jenkins_home/print_summary.py +++ b/tools/jenkins/jenkins_home/print_summary.py @@ -1,7 +1,20 @@ #!/usr/bin/python -import urllib + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + import json import sys +import urllib def print_usage(): @@ -42,4 +55,4 @@ def fetch_blob(url): 'logUrl': log_url, 'healthReport': config['healthReport']}) -print json.dumps(results) +print(json.dumps(results)) diff --git a/tools/uec/meta.py b/tools/uec/meta.py index 5b845d81a6..1d994a60d6 100644 --- a/tools/uec/meta.py +++ b/tools/uec/meta.py @@ -1,10 +1,23 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import BaseHTTPServer +import SimpleHTTPServer import sys -from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler -from SimpleHTTPServer import SimpleHTTPRequestHandler -def main(host, port, HandlerClass = SimpleHTTPRequestHandler, - ServerClass = HTTPServer, protocol="HTTP/1.0"): - """simple http server that listens on a give address:port""" + +def main(host, port, HandlerClass=SimpleHTTPServer.SimpleHTTPRequestHandler, + ServerClass=BaseHTTPServer.HTTPServer, protocol="HTTP/1.0"): + """simple http server that listens on a give address:port.""" server_address = (host, port) @@ -12,7 +25,7 @@ def main(host, port, HandlerClass = SimpleHTTPRequestHandler, httpd = ServerClass(server_address, HandlerClass) sa = httpd.socket.getsockname() - print "Serving HTTP on", sa[0], "port", sa[1], "..." + print("Serving HTTP on", sa[0], "port", sa[1], "...") httpd.serve_forever() if __name__ == '__main__': From 9b3602ccf64f1d690a0a3d4adff987a5a12594b1 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Fri, 28 Feb 2014 13:52:29 -0500 Subject: [PATCH 0216/4119] Fix intermittent error in exercises/floating_ips.sh Every once in a while I see this error running floating_ips.sh: /devstack/exercises/floating_ips.sh:184:ping_check /devstack/functions:356:_ping_check_neutron /devstack/lib/neutron:904:die [ERROR] /devstack/lib/neutron:904 [Fail] Could ping server I think the problem is that it immediately tries to ping right after the icmp rule is deleted. Add a timeout and check so we at least wait one second. Change-Id: I753ec257fa12f6d2ddff1a5b1909e32d8995e173 --- exercises/floating_ips.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 8dc44effbc..8b7b96197e 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -178,6 +178,10 @@ fi nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \ die $LINENO "Failure deleting security group rule from $SECGROUP" +if ! timeout $ASSOCIATE_TIMEOUT sh -c "while nova secgroup-list-rules $SECGROUP | grep -q icmp; do sleep 1; done"; then + die $LINENO "Security group rule not deleted from $SECGROUP" +fi + # FIXME (anthony): make xs support security groups if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds From 729236ca1a38804b3c31ec39ef65592e0108f863 Mon Sep 17 00:00:00 2001 From: Mohammad Banikazemi Date: Wed, 5 Feb 2014 14:45:04 -0500 Subject: [PATCH 0217/4119] Adds support for IBM SDN-VE Neutron plugin This provides the support for the monolithic plugin for IBM SDN-VE that is being added to Neutron here: https://review.openstack.org/#/c/66453/ Implements: blueprint ibm-sdnve-plugin-support Depends-On: I92619a95bca2ae0c37e7fdd39da30119b43d1ad6 DocImpact Change-Id: I0958457355036fdab93156cd7fb4afd1a458918b --- lib/neutron_plugins/ibm | 133 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) create mode 100644 lib/neutron_plugins/ibm diff --git a/lib/neutron_plugins/ibm b/lib/neutron_plugins/ibm new file mode 100644 index 0000000000..22c8578e64 --- /dev/null +++ b/lib/neutron_plugins/ibm @@ -0,0 +1,133 @@ +# Neutron IBM SDN-VE plugin +# --------------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/neutron_plugins/ovs_base + +function neutron_plugin_install_agent_packages { + _neutron_ovs_base_install_agent_packages +} + +function _neutron_interface_setup { + # Setup one interface on the integration bridge if needed + # The plugin agent to be used if more than one interface is used + local bridge=$1 + local interface=$2 + sudo ovs-vsctl --no-wait -- --may-exist add-port $bridge $interface +} + +function neutron_setup_integration_bridge { + # Setup integration bridge if needed + if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then + neutron_ovs_base_cleanup + _neutron_ovs_base_setup_bridge $SDNVE_INTEGRATION_BRIDGE + if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then + interfaces=(${SDNVE_INTERFACE_MAPPINGS//[,:]/ }) + _neutron_interface_setup $SDNVE_INTEGRATION_BRIDGE ${interfaces[1]} + fi + fi + + # Set controller to SDNVE controller (1st of list) if exists + if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then + # Get the first controller + controllers=(${SDNVE_CONTROLLER_IPS//[\[,\]]/ }) + SDNVE_IP=${controllers[0]} + sudo ovs-vsctl set-controller $SDNVE_INTEGRATION_BRIDGE tcp:$SDNVE_IP + fi +} + +function neutron_plugin_create_nova_conf { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + # if n-cpu is enabled, then setup integration bridge + if is_service_enabled n-cpu; then + neutron_setup_integration_bridge + fi +} + +function is_neutron_ovs_base_plugin { + if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then + # Yes, we use OVS. + return 0 + else + # No, we do not use OVS. + return 1 + fi +} + +function neutron_plugin_configure_common { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ibm + Q_PLUGIN_CONF_FILENAME=sdnve_neutron_plugin.ini + Q_DB_NAME="sdnve_neutron" + Q_PLUGIN_CLASS="neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2" +} + +function neutron_plugin_configure_service { + # Define extra "SDNVE" configuration options when q-svc is configured + + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver + + if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE sdnve controller_ips $SDNVE_CONTROLLER_IPS + fi + + if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE sdnve integration_bridge $SDNVE_INTEGRATION_BRIDGE + fi + + if [[ "$SDNVE_RESET_BRIDGE" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE sdnve reset_bridge $SDNVE_RESET_BRIDGE + fi + + if [[ "$SDNVE_OUT_OF_BAND" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE sdnve out_of_band $SDNVE_OUT_OF_BAND + fi + + if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE sdnve interface_mappings $SDNVE_INTERFACE_MAPPINGS + fi + + if [[ "$SDNVE_FAKE_CONTROLLER" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE sdnve use_fake_controller $SDNVE_FAKE_CONTROLLER + fi + + + iniset $NEUTRON_CONF DEFAULT notification_driver neutron.openstack.common.notifier.no_op_notifier + +} + +function neutron_plugin_configure_plugin_agent { + AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-ibm-agent" +} + +function neutron_plugin_configure_debug_command { + : +} + +function neutron_plugin_setup_interface_driver { + return 0 +} + +function has_neutron_plugin_security_group { + # Does not support Security Groups + return 1 +} + +function neutron_ovs_base_cleanup { + if [[ "$SDNVE_RESET_BRIDGE" != False ]]; then + # remove all OVS ports that look like Neutron created ports + for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do + sudo ovs-vsctl del-port ${port} + done + + # remove integration bridge created by Neutron + for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${SDNVE_INTEGRATION_BRIDGE}); do + sudo ovs-vsctl del-br ${bridge} + done + fi +} + +# Restore xtrace +$MY_XTRACE From 91baef3e26994c64249453dd0b1d8998eda10eca Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 28 Feb 2014 11:11:45 -0600 Subject: [PATCH 0218/4119] Clarify deprecation of EXTRA_xxx_OPTS The various EXTRA_xxx_OPTS variables will be removed in the Juno development cycle, change the README to reflect the new way for the Neutron variables. Change-Id: Ic84da4a9b5a83e66cf0b57d643a87691f15517f0 --- README.md | 50 ++++++++++++++++++++++++++++++++++---------------- stack.sh | 48 +++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 79 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index 9914b1ed69..9304240f70 100644 --- a/README.md +++ b/README.md @@ -163,7 +163,7 @@ services are started in background and managed by `swift-init` tool. Basic Setup In order to enable Neutron a single node setup, you'll need the -following settings in your `localrc` section: +following settings in your `local.conf`: disable_service n-net enable_service q-svc @@ -172,7 +172,6 @@ following settings in your `localrc` section: enable_service q-l3 enable_service q-meta enable_service q-metering - enable_service neutron # Optional, to enable tempest configuration as part of DevStack enable_service tempest @@ -180,24 +179,44 @@ Then run `stack.sh` as normal. DevStack supports setting specific Neutron configuration flags to the service, Open vSwitch plugin and LinuxBridge plugin configuration files. -To make use of this feature, the following variables are defined and can -be configured in your `localrc` section: +To make use of this feature, the settings can be added to ``local.conf``. +The old ``Q_XXX_EXTRA_XXX_OPTS`` variables are deprecated and will be removed +in the near future. The ``local.conf`` headers for the replacements are: - Variable Name Config File Section Modified - ------------------------------------------------------------------------------------- - Q_SRV_EXTRA_OPTS Plugin `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge) - Q_AGENT_EXTRA_AGENT_OPTS Plugin AGENT - Q_AGENT_EXTRA_SRV_OPTS Plugin `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge) - Q_SRV_EXTRA_DEFAULT_OPTS Service DEFAULT +* ``Q_SRV_EXTRA_OPTS``: + + [[post-config|/$Q_PLUGIN_CONF_FILE]] + [linuxbridge] # or [ovs] + +* ``Q_AGENT_EXTRA_AGENT_OPTS``: + + [[post-config|/$Q_PLUGIN_CONF_FILE]] + [agent] + +* ``Q_AGENT_EXTRA_SRV_OPTS``: -An example of using the variables in your `localrc` section is below: + [[post-config|/$Q_PLUGIN_CONF_FILE]] + [linuxbridge] # or [ovs] + +* ``Q_SRV_EXTRA_DEFAULT_OPTS``: + + [[post-config|$NEUTRON_CONF]] + [DEFAULT] - Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_type=vxlan vxlan_udp_port=8472) - Q_SRV_EXTRA_OPTS=(tenant_network_type=vxlan) +Example extra config in `local.conf`: + + [[post-config|/$Q_PLUGIN_CONF_FILE]] + [agent] + tunnel_type=vxlan + vxlan_udp_port=8472 + + [[post-config|$NEUTRON_CONF]] + [DEFAULT] + tenant_network_type=vxlan DevStack also supports configuring the Neutron ML2 plugin. The ML2 plugin -can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. A -simple way to configure the ml2 plugin is shown below: +can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. This +is a simple way to configure the ml2 plugin: # VLAN configuration Q_PLUGIN=ml2 @@ -223,7 +242,6 @@ To change this, set the `Q_AGENT` variable to the agent you want to run Q_ML2_PLUGIN_GRE_TYPE_OPTIONS GRE TypeDriver options. Defaults to none. Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS VXLAN TypeDriver options. Defaults to none. Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS VLAN TypeDriver options. Defaults to none. - Q_AGENT_EXTRA_AGENT_OPTS Extra configuration options to pass to the OVS or LinuxBridge Agent. # Heat diff --git a/stack.sh b/stack.sh index ccd567e0bc..988fda5ff1 100755 --- a/stack.sh +++ b/stack.sh @@ -1359,12 +1359,14 @@ if [[ -n "$DEPRECATED_TEXT" ]]; then echo_summary "WARNING: $DEPRECATED_TEXT" fi +# TODO(dtroyer): Remove EXTRA_OPTS after stable/icehouse branch is cut # Specific warning for deprecated configs if [[ -n "$EXTRA_OPTS" ]]; then echo "" echo_summary "WARNING: EXTRA_OPTS is used" echo "You are using EXTRA_OPTS to pass configuration into nova.conf." echo "Please convert that configuration in localrc to a nova.conf section in local.conf:" + echo "EXTRA_OPTS will be removed early in the Juno development cycle" echo " [[post-config|\$NOVA_CONF]] [DEFAULT] @@ -1375,11 +1377,13 @@ if [[ -n "$EXTRA_OPTS" ]]; then done fi +# TODO(dtroyer): Remove EXTRA_BAREMETAL_OPTS after stable/icehouse branch is cut if [[ -n "$EXTRA_BAREMETAL_OPTS" ]]; then echo "" - echo_summary "WARNING: EXTRA_OPTS is used" - echo "You are using EXTRA_OPTS to pass configuration into nova.conf." + echo_summary "WARNING: EXTRA_BAREMETAL_OPTS is used" + echo "You are using EXTRA_BAREMETAL_OPTS to pass configuration into nova.conf." echo "Please convert that configuration in localrc to a nova.conf section in local.conf:" + echo "EXTRA_BAREMETAL_OPTS will be removed early in the Juno development cycle" echo " [[post-config|\$NOVA_CONF]] [baremetal] @@ -1390,13 +1394,49 @@ if [[ -n "$EXTRA_BAREMETAL_OPTS" ]]; then done fi +# TODO(dtroyer): Remove Q_AGENT_EXTRA_AGENT_OPTS after stable/juno branch is cut +if [[ -n "$Q_AGENT_EXTRA_AGENT_OPTS" ]]; then + echo "" + echo_summary "WARNING: Q_AGENT_EXTRA_AGENT_OPTS is used" + echo "You are using Q_AGENT_EXTRA_AGENT_OPTS to pass configuration into $NEUTRON_CONF." + echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:" + echo "Q_AGENT_EXTRA_AGENT_OPTS will be removed early in the 'K' development cycle" + echo " +[[post-config|/\$Q_PLUGIN_CONF_FILE]] +[DEFAULT] +" + for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + echo ${I} + done +fi + +# TODO(dtroyer): Remove Q_AGENT_EXTRA_SRV_OPTS after stable/juno branch is cut +if [[ -n "$Q_AGENT_EXTRA_SRV_OPTS" ]]; then + echo "" + echo_summary "WARNING: Q_AGENT_EXTRA_SRV_OPTS is used" + echo "You are using Q_AGENT_EXTRA_SRV_OPTS to pass configuration into $NEUTRON_CONF." + echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:" + echo "Q_AGENT_EXTRA_AGENT_OPTS will be removed early in the 'K' development cycle" + echo " +[[post-config|/\$Q_PLUGIN_CONF_FILE]] +[DEFAULT] +" + for I in "${Q_AGENT_EXTRA_SRV_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + echo ${I} + done +fi + +# TODO(dtroyer): Remove Q_DHCP_EXTRA_DEFAULT_OPTS after stable/icehouse branch is cut if [[ -n "$Q_DHCP_EXTRA_DEFAULT_OPTS" ]]; then echo "" echo_summary "WARNING: Q_DHCP_EXTRA_DEFAULT_OPTS is used" echo "You are using Q_DHCP_EXTRA_DEFAULT_OPTS to pass configuration into $Q_DHCP_CONF_FILE." echo "Please convert that configuration in localrc to a $Q_DHCP_CONF_FILE section in local.conf:" + echo "Q_DHCP_EXTRA_DEFAULT_OPTS will be removed early in the Juno development cycle" echo " -[[post-config|\$Q_DHCP_CONF_FILE]] +[[post-config|/\$Q_DHCP_CONF_FILE]] [DEFAULT] " for I in "${Q_DHCP_EXTRA_DEFAULT_OPTS[@]}"; do @@ -1405,11 +1445,13 @@ if [[ -n "$Q_DHCP_EXTRA_DEFAULT_OPTS" ]]; then done fi +# TODO(dtroyer): Remove Q_SRV_EXTRA_DEFAULT_OPTS after stable/icehouse branch is cut if [[ -n "$Q_SRV_EXTRA_DEFAULT_OPTS" ]]; then echo "" echo_summary "WARNING: Q_SRV_EXTRA_DEFAULT_OPTS is used" echo "You are using Q_SRV_EXTRA_DEFAULT_OPTS to pass configuration into $NEUTRON_CONF." echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:" + echo "Q_SRV_EXTRA_DEFAULT_OPTS will be removed early in the Juno development cycle" echo " [[post-config|\$NEUTRON_CONF]] [DEFAULT] From 57d478d87438912e1a33d4a2d00d4a300148e2fc Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Fri, 28 Feb 2014 16:37:43 +0000 Subject: [PATCH 0219/4119] Move heat keystone setup into lib/heat Move the heat setup which currently happens in files/keystone_data.sh to lib/heat, where we have create_heat_accounts. Move the user, role, service and endpoint creation as that is consistent with what other services, e.g lib/nova are doing. Change-Id: Iaa2c822cad581d6b2b4f22f8863daf81e25f8485 --- files/keystone_data.sh | 35 ---------------------------------- lib/heat | 43 +++++++++++++++++++++++++++++++++++++++++- stack.sh | 3 +-- 3 files changed, 43 insertions(+), 38 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 9a34c7616f..fc1e8136a4 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -53,41 +53,6 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" | --role ResellerAdmin fi -# Heat -if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then - keystone user-create --name=heat \ - --pass="$SERVICE_PASSWORD" \ - --tenant $SERVICE_TENANT_NAME \ - --email=heat@example.com - keystone user-role-add --tenant $SERVICE_TENANT_NAME \ - --user heat \ - --role service - # heat_stack_user role is for users created by Heat - keystone role-create --name heat_stack_user - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - keystone service-create \ - --name=heat-cfn \ - --type=cloudformation \ - --description="Heat CloudFormation Service" - keystone endpoint-create \ - --region RegionOne \ - --service heat-cfn \ - --publicurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \ - --adminurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \ - --internalurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" - keystone service-create \ - --name=heat \ - --type=orchestration \ - --description="Heat Service" - keystone endpoint-create \ - --region RegionOne \ - --service heat \ - --publicurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ - --internalurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" - fi -fi - # Glance if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then keystone user-create \ diff --git a/lib/heat b/lib/heat index d0c0302016..42d1057cbd 100644 --- a/lib/heat +++ b/lib/heat @@ -197,8 +197,49 @@ function disk_image_create { } # create_heat_accounts() - Set up common required heat accounts -# Note this is in addition to what is in files/keystone_data.sh function create_heat_accounts { + # migrated from files/keystone_data.sh + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") + + HEAT_USER=$(openstack user create \ + heat \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email heat@example.com \ + | grep " id " | get_field 2) + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $HEAT_USER + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + HEAT_SERVICE=$(openstack service create \ + heat \ + --type=orchestration \ + --description="Heat Orchestration Service" \ + | grep " id " | get_field 2) + openstack endpoint create \ + $HEAT_SERVICE \ + --region RegionOne \ + --publicurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ + --adminurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ + --internalurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" + HEAT_CFN_SERVICE=$(openstack service create \ + heat \ + --type=cloudformation \ + --description="Heat CloudFormation Service" \ + | grep " id " | get_field 2) + openstack endpoint create \ + $HEAT_CFN_SERVICE \ + --region RegionOne \ + --publicurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \ + --adminurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \ + --internalurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" + fi + + # heat_stack_user role is for users created by Heat + openstack role create heat_stack_user + # Note we have to pass token/endpoint here because the current endpoint and # version negotiation in OSC means just --os-identity-api-version=3 won't work KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" diff --git a/stack.sh b/stack.sh index ccd567e0bc..ec8de2d2dd 100755 --- a/stack.sh +++ b/stack.sh @@ -934,8 +934,7 @@ if is_service_enabled key; then ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \ - DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES HEAT_API_CFN_PORT=$HEAT_API_CFN_PORT \ - HEAT_API_PORT=$HEAT_API_PORT \ + DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \ bash -x $FILES/keystone_data.sh # Set up auth creds now that keystone is bootstrapped From 2ca3bf18dd756621f012ebb7ffb338f2fa38d6f2 Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Mon, 3 Mar 2014 18:07:33 +0000 Subject: [PATCH 0220/4119] Add heat_stack_owner role for heat trusts usage Heat supports deferred operations via keystone trusts, and we'd like to make that the default. To do this, we require a new role, which is the default role specified in heat.conf trusts_delegated_roles, heat_stack_owner. Add the role to the admin/demo users so they can create heat stacks when we make deferred_auth_method=trusts the default. Change-Id: Idfc70ee89428c23f5965e643486ff2ad9566471c Related-Bug: #1286157 --- lib/heat | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/lib/heat b/lib/heat index 42d1057cbd..2d9d863f0c 100644 --- a/lib/heat +++ b/lib/heat @@ -240,6 +240,19 @@ function create_heat_accounts { # heat_stack_user role is for users created by Heat openstack role create heat_stack_user + # heat_stack_owner role is given to users who create Heat stacks, + # it's the default role used by heat to delegate to the heat service + # user (for performing deferred operations via trusts), see heat.conf + HEAT_OWNER_ROLE=$(openstack role create \ + heat_stack_owner \ + | grep " id " | get_field 2) + + # Give the role to the demo and admin users so they can create stacks + # in either of the projects created by devstack + openstack role add $HEAT_OWNER_ROLE --project demo --user demo + openstack role add $HEAT_OWNER_ROLE --project demo --user admin + openstack role add $HEAT_OWNER_ROLE --project admin --user admin + # Note we have to pass token/endpoint here because the current endpoint and # version negotiation in OSC means just --os-identity-api-version=3 won't work KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" From de3b82037d863b55cc245c343a8697b5cf4b1904 Mon Sep 17 00:00:00 2001 From: Shweta P Date: Mon, 3 Mar 2014 13:38:37 -0500 Subject: [PATCH 0221/4119] NCCLIENT_REPO is using the wrong url NCCLIENT_REPO value in lib/neutron_plugins/cisco is pointing to a repo that does not exist. This fix corrects the url. Closes-Bug #1286302 Change-Id: I42db0b3f7a4bbf5d1d053e3da8b4fbb67d47de94 --- lib/neutron_plugins/cisco | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco index 7728eb177f..a1b089e1a3 100644 --- a/lib/neutron_plugins/cisco +++ b/lib/neutron_plugins/cisco @@ -23,7 +23,7 @@ Q_CISCO_PLUGIN_VLAN_RANGES=${Q_CISCO_PLUGIN_VLAN_RANGES:-vlan:1:4094} # Specify ncclient package information NCCLIENT_DIR=$DEST/ncclient NCCLIENT_VERSION=${NCCLIENT_VERSION:-0.3.1} -NCCLIENT_REPO=${NCCLIENT_REPO:-${GIT_BASE}/CiscoSystems/ncclient.git} +NCCLIENT_REPO=${NCCLIENT_REPO:-git://github.com/CiscoSystems/ncclient.git} NCCLIENT_BRANCH=${NCCLIENT_BRANCH:-master} # This routine put a prefix on an existing function name From 753afeba7464464a3fd050eb2085e51580f9b5a7 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Thu, 13 Feb 2014 17:17:30 -0800 Subject: [PATCH 0222/4119] Use neutron security groups in BigSwitch plugin Configures the Big Switch third-party plugin to use neutron security groups instead of nova security groups. Change-Id: I6bc3046ff0e70b8288a7c3f3d6f975376adc081a Implements: blueprint bigswitch-neutron-security --- lib/neutron_plugins/bigswitch_floodlight | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index 4cb0da84ea..b1b77d7606 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -38,7 +38,12 @@ function neutron_plugin_configure_l3_agent { } function neutron_plugin_configure_plugin_agent { - : + # Set up integration bridge + _neutron_ovs_base_setup_bridge $OVS_BRIDGE + iniset /$Q_PLUGIN_CONF_FILE restproxyagent integration_bridge $OVS_BRIDGE + AGENT_BINARY="$NEUTRON_DIR/neutron/plugins/bigswitch/agent/restproxy_agent.py" + + _neutron_ovs_base_configure_firewall_driver } function neutron_plugin_configure_service { @@ -61,7 +66,7 @@ function neutron_plugin_setup_interface_driver { function has_neutron_plugin_security_group { # 1 means False here - return 1 + return 0 } function neutron_plugin_check_adv_test_requirements { From 8829acaf141ade6d5ac61ec3d0b15d80e3a09752 Mon Sep 17 00:00:00 2001 From: zhang-jinnan Date: Mon, 3 Mar 2014 10:55:33 +0800 Subject: [PATCH 0223/4119] Remove blank space after print Keep code clean and pleasure:) Change-Id: Ie0c0781eaeb57b32a9a6185a59353fc4b911afd6 --- tools/jenkins/jenkins_home/print_summary.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/jenkins/jenkins_home/print_summary.py b/tools/jenkins/jenkins_home/print_summary.py index ea943e1caf..6310b1889f 100755 --- a/tools/jenkins/jenkins_home/print_summary.py +++ b/tools/jenkins/jenkins_home/print_summary.py @@ -5,8 +5,8 @@ def print_usage(): - print ("Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]" - % sys.argv[0]) + print("Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]" + % sys.argv[0]) sys.exit() From ccf60f75a2a5a0f10412b4f806ac7a123068909b Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Mon, 3 Mar 2014 22:48:31 -0500 Subject: [PATCH 0224/4119] Put tempest ipv6 option in the correct group This commit updates the location for the ipv6 option to be in the proper group. This depends on tempest change I35769cf4d18363fad56ed5150b4d01d8a5ad17e7 Change-Id: Ief5ea00649c8954282245e30c63c45557a28ea9f --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 16f8744d85..1639ae60b4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -293,7 +293,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG network public_network_id "$public_network_id" iniset $TEMPEST_CONFIG network public_router_id "$public_router_id" iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE" - iniset $TEMPEST_CONFIG network ipv6_enabled "$IPV6_ENABLED" + iniset $TEMPEST_CONFIG network-feature-enabled ipv6 "$IPV6_ENABLED" # boto iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" From 314af0a7a97b31ff2a803a77e1a92f5b67857f18 Mon Sep 17 00:00:00 2001 From: Sreeram Yerrapragada Date: Mon, 3 Mar 2014 21:34:45 -0800 Subject: [PATCH 0225/4119] Fix upload function for vmdk files Fix all grep statements failing under -o errexit. Change-Id: I0591a2ba7351d598eb5b29d68a83ce6290600938 --- functions | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/functions b/functions index a844b1c1af..ab8319b0ce 100644 --- a/functions +++ b/functions @@ -55,7 +55,7 @@ function upload_image { mkdir -p $FILES/images IMAGE_FNAME=`basename "$image_url"` if [[ $image_url != file* ]]; then - # Downloads the image (uec ami+aki style), then extracts it. + # Downloads the image (uec ami+akistyle), then extracts it. if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then wget -c $image_url -O $FILES/$IMAGE_FNAME if [[ $? -ne 0 ]]; then @@ -103,12 +103,12 @@ function upload_image { vmdk_net_adapter="" # vmdk adapter type - vmdk_adapter_type="$(head -25 $IMAGE | grep -a -F -m 1 'ddb.adapterType =' $IMAGE)" + vmdk_adapter_type="$(head -25 $IMAGE | { grep -a -F -m 1 'ddb.adapterType =' $IMAGE || true; })" vmdk_adapter_type="${vmdk_adapter_type#*\"}" vmdk_adapter_type="${vmdk_adapter_type%?}" # vmdk disk type - vmdk_create_type="$(head -25 $IMAGE | grep -a -F -m 1 'createType=' $IMAGE)" + vmdk_create_type="$(head -25 $IMAGE | { grep -a -F -m 1 'createType=' $IMAGE || true; })" vmdk_create_type="${vmdk_create_type#*\"}" vmdk_create_type="${vmdk_create_type%\"*}" @@ -119,7 +119,7 @@ function upload_image { elif [[ "$vmdk_create_type" = "monolithicFlat" || \ "$vmdk_create_type" = "vmfs" ]]; then # Attempt to retrieve the *-flat.vmdk - flat_fname="$(head -25 $IMAGE | grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE)" + flat_fname="$(head -25 $IMAGE | { grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE || true; })" flat_fname="${flat_fname#*\"}" flat_fname="${flat_fname%?}" if [[ -z "$flat_name" ]]; then @@ -190,7 +190,7 @@ function upload_image { fi if $descriptor_found; then vmdk_adapter_type="$(head -25 $descriptor_url |"` - `"grep -a -F -m 1 'ddb.adapterType =' $descriptor_url)" + `" { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })" vmdk_adapter_type="${vmdk_adapter_type#*\"}" vmdk_adapter_type="${vmdk_adapter_type%?}" fi @@ -203,7 +203,7 @@ function upload_image { # NOTE: For backwards compatibility reasons, colons may be used in place # of semi-colons for property delimiters but they are not permitted # characters in NTFS filesystems. - property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$'` + property_string=`echo "$IMAGE_NAME" | { grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$' || true; }` IFS=':;' read -a props <<< "$property_string" vmdk_disktype="${props[0]:-$vmdk_disktype}" vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}" From a439faa85b89b0d2c73085743426fd8741293cb6 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Mon, 24 Feb 2014 20:32:19 +0900 Subject: [PATCH 0226/4119] Update required packages for ryu Sync with the recent reality. Change-Id: I4c37d09e511f3763d2267267815387bd5c825e0e Closes-Bug: 1287541 --- files/apts/ryu | 4 +--- files/rpms-suse/ryu | 4 +--- files/rpms/ryu | 4 +--- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/files/apts/ryu b/files/apts/ryu index e8ed926c1e..9b850807e6 100644 --- a/files/apts/ryu +++ b/files/apts/ryu @@ -1,4 +1,2 @@ -python-gevent -python-gflags -python-netifaces +python-eventlet python-sphinx diff --git a/files/rpms-suse/ryu b/files/rpms-suse/ryu index 3797b6cb44..6b426fb163 100644 --- a/files/rpms-suse/ryu +++ b/files/rpms-suse/ryu @@ -1,4 +1,2 @@ python-Sphinx -python-gevent -python-netifaces -python-python-gflags +python-eventlet diff --git a/files/rpms/ryu b/files/rpms/ryu index e8ed926c1e..9b850807e6 100644 --- a/files/rpms/ryu +++ b/files/rpms/ryu @@ -1,4 +1,2 @@ -python-gevent -python-gflags -python-netifaces +python-eventlet python-sphinx From 0e598c3c81fc3d652415095101a095de69ec8a6d Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Mon, 24 Feb 2014 22:02:08 +0900 Subject: [PATCH 0227/4119] Stop running setup_devel for Ryu It doesn't work here for various reasons. - Ryu's setup.py is incompatible with global requirements - This code is called before install_infra. Ryu is not a part of OpenStack anyway. Closes-Bug: 1287569 Change-Id: I01a942411f7d06bdf8f1fec5d1a0bc319560f329 --- lib/neutron_thirdparty/ryu | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu index 424a90041e..b2c1b613fe 100644 --- a/lib/neutron_thirdparty/ryu +++ b/lib/neutron_thirdparty/ryu @@ -18,14 +18,8 @@ RYU_OFP_PORT=${RYU_OFP_PORT:-6633} # Ryu Applications RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest} -# configure_ryu can be called multiple times as neutron_pluing/ryu may call -# this function for neutron-ryu-agent -_RYU_CONFIGURED=${_RYU_CONFIGURED:-False} function configure_ryu { - if [[ "$_RYU_CONFIGURED" == "False" ]]; then - setup_develop $RYU_DIR - _RYU_CONFIGURED=True - fi + : } function init_ryu { @@ -63,6 +57,7 @@ _RYU_INSTALLED=${_RYU_INSTALLED:-False} function install_ryu { if [[ "$_RYU_INSTALLED" == "False" ]]; then git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH + export PYTHONPATH=$RYU_DIR:$PYTHONPATH _RYU_INSTALLED=True fi } From d5b52ca7557ec1aef71f21c71110455a6aea2505 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 4 Mar 2014 09:23:07 -0500 Subject: [PATCH 0228/4119] fix tgt to use 'service' instead of upstart calls the comments in here were largely about oneric, which we don't support any more. service is installed in a precise environment, and will support debian and the upcoming transition to systemd better, so use that instead. Change-Id: If15493549a8c93a7387df9b3bba31443aed46995 --- lib/cinder | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/cinder b/lib/cinder index d003f5dc7b..dd2956a5b4 100644 --- a/lib/cinder +++ b/lib/cinder @@ -491,10 +491,7 @@ function start_cinder { sudo rm -f /etc/tgt/conf.d/stack.conf _configure_tgt_for_config_d if is_ubuntu; then - # tgt in oneiric doesn't restart properly if tgtd isn't running - # do it in two steps - sudo stop tgt || true - sudo start tgt + sudo service tgt restart elif is_fedora; then if [[ $DISTRO =~ (rhel6) ]]; then sudo /sbin/service tgtd restart From a67cb1af4df6b5c758c319e0590a3188d951e68d Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Tue, 4 Mar 2014 18:38:33 +0400 Subject: [PATCH 0229/4119] Fix typo in ironic configure function IRONIC_CONF should be replaced by IRONIC_CONF_FILE Change-Id: Ie43e376f42f14c46d21df7dbb19db923521f438b --- lib/ironic | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ironic b/lib/ironic index 4e5edc90cf..b346de1e69 100644 --- a/lib/ironic +++ b/lib/ironic @@ -124,7 +124,7 @@ function configure_ironic_conductor { cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR - iniset $IRONIC_CONF DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF + iniset $IRONIC_CONF_FILE DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF } # create_ironic_cache_dir() - Part of the init_ironic() process From 3d2bdf50bc0110c718de39606c8b803696a31285 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Sat, 1 Mar 2014 00:17:32 -0500 Subject: [PATCH 0230/4119] Use cat instead of read Date: Tue, 4 Mar 2014 15:02:04 -0500 Subject: [PATCH 0231/4119] fix typo in lib/ceilometer this should be is_service_enabled and not service_enabled. Not sure why it passes in the gate, but it fails in stackforge jobs. Change-Id: I876f72cd98ff9c8e4ea28832bc9ac6bbdc3b865d --- lib/ceilometer | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ceilometer b/lib/ceilometer index 2e6e7c5a76..04c1a34b8b 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -209,7 +209,7 @@ function start_ceilometer { screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" # only die on API if it was actually intended to be turned on - if service_enabled ceilometer-api; then + if is_service_enabled ceilometer-api; then echo "Waiting for ceilometer-api to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then die $LINENO "ceilometer-api did not start" From e2aa91b237e7e23f70847cba60a54a40560a5a3c Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Tue, 4 Mar 2014 04:40:19 -0500 Subject: [PATCH 0232/4119] Enable marconi-server to run when USE_SCREEN=false This patch, 1. adds log_file option to marconi.conf 2. redirects the output from marconi-server, in the same precedent set by another project. Change-Id: Ib273a03625d5a4edf8bb3ed7d522d2b087975acd --- lib/marconi | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/marconi b/lib/marconi index 29ae386d9f..a96137fc04 100644 --- a/lib/marconi +++ b/lib/marconi @@ -34,7 +34,8 @@ MARCONI_DIR=$DEST/marconi MARCONICLIENT_DIR=$DEST/python-marconiclient MARCONI_CONF_DIR=/etc/marconi MARCONI_CONF=$MARCONI_CONF_DIR/marconi.conf -MARCONI_API_LOG_DIR=/var/log/marconi-api +MARCONI_API_LOG_DIR=/var/log/marconi +MARCONI_API_LOG_FILE=$MARCONI_API_LOG_DIR/queues.log MARCONI_AUTH_CACHE_DIR=${MARCONI_AUTH_CACHE_DIR:-/var/cache/marconi} # Support potential entry-points console scripts @@ -96,6 +97,7 @@ function configure_marconi { iniset $MARCONI_CONF DEFAULT verbose True iniset $MARCONI_CONF DEFAULT use_syslog $SYSLOG + iniset $MARCONI_CONF DEFAULT log_file $MARCONI_API_LOG_FILE iniset $MARCONI_CONF 'drivers:transport:wsgi' bind $MARCONI_SERVICE_HOST iniset $MARCONI_CONF keystone_authtoken auth_protocol http @@ -148,7 +150,7 @@ function install_marconiclient { # start_marconi() - Start running processes, including screen function start_marconi { - screen_it marconi-server "marconi-server --config-file $MARCONI_CONF" + screen_it marconi-server "marconi-server --config-file $MARCONI_CONF 2>&1" echo "Waiting for Marconi to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT/v1/health; do sleep 1; done"; then die $LINENO "Marconi did not start" From ae1728917373986b68d2b0abe2e7052fb78e5903 Mon Sep 17 00:00:00 2001 From: ronak Date: Tue, 4 Mar 2014 15:48:22 -0800 Subject: [PATCH 0233/4119] Supporting Nuage Networks' Plugin through devstack Nuage networks' plugin specific configuration setting file for devstack Change-Id: I936f87b8fbc6f90130514b2fc0d111eab861da7c Implements: blueprint nuage-networks-plugin --- lib/neutron_plugins/nuage | 69 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 lib/neutron_plugins/nuage diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage new file mode 100644 index 0000000000..3649f39bfd --- /dev/null +++ b/lib/neutron_plugins/nuage @@ -0,0 +1,69 @@ +# Nuage Neutron Plugin +# ---------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +function neutron_plugin_create_nova_conf { + NOVA_OVS_BRIDGE=${NOVA_OVS_BRIDGE:-"br-int"} + iniset $NOVA_CONF DEFAULT neutron_ovs_bridge $NOVA_OVS_BRIDGE + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver + iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER +} + +function neutron_plugin_install_agent_packages { + : +} + +function neutron_plugin_configure_common { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nuage + Q_PLUGIN_CONF_FILENAME=nuage_plugin.ini + Q_DB_NAME="nuage_neutron" + Q_PLUGIN_CLASS="neutron.plugins.nuage.plugin.NuagePlugin" + Q_PLUGIN_EXTENSIONS_PATH=neutron/plugins/nuage/extensions + #Nuage specific Neutron defaults. Actual value must be set and sourced + NUAGE_CNA_SERVERS=${NUAGE_CNA_SERVERS:-'localhost:8443'} + NUAGE_CNA_SERVER_AUTH=${NUAGE_CNA_SERVER_AUTH:-'username:password'} + NUAGE_CNA_ORGANIZATION=${NUAGE_CNA_ORGANIZATION:-'org'} + NUAGE_CNA_SERVER_SSL=${NUAGE_CNA_SERVER_SSL:-'True'} + NUAGE_CNA_BASE_URI=${NUAGE_CNA_BASE_URI:-'/'} + NUAGE_CNA_AUTH_RESOURCE=${NUAGE_CNA_AUTH_RESOURCE:-'/'} + NUAGE_CNA_DEF_NETPART_NAME=${NUAGE_CNA_DEF_NETPART_NAME:-''} +} + +function neutron_plugin_configure_debug_command { + : +} + +function neutron_plugin_configure_dhcp_agent { + : +} + +function neutron_plugin_configure_l3_agent { + : +} + +function neutron_plugin_configure_plugin_agent { + : +} + +function neutron_plugin_configure_service { + iniset $NEUTRON_CONF DEFAULT api_extensions_path neutron/plugins/nuage/extensions/ + iniset /$Q_PLUGIN_CONF_FILE restproxy base_uri $NUAGE_CNA_BASE_URI + iniset /$Q_PLUGIN_CONF_FILE restproxy serverssl $NUAGE_CNA_SERVER_SSL + iniset /$Q_PLUGIN_CONF_FILE restproxy serverauth $NUAGE_CNA_SERVER_AUTH + iniset /$Q_PLUGIN_CONF_FILE restproxy organization $NUAGE_CNA_ORGANIZATION + iniset /$Q_PLUGIN_CONF_FILE restproxy server $NUAGE_CNA_SERVERS + iniset /$Q_PLUGIN_CONF_FILE restproxy auth_resource $NUAGE_CNA_AUTH_RESOURCE + iniset /$Q_PLUGIN_CONF_FILE restproxy default_net_partition_name $NUAGE_CNA_DEF_NETPART_NAME +} + +function has_neutron_plugin_security_group { + # 1 means False here + return 1 +} + +# Restore xtrace +$MY_XTRACE From 8068455a023063b615fc66ee038211a9ae300a81 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 5 Mar 2014 11:50:23 -0600 Subject: [PATCH 0234/4119] Close all logging file descriptors This has lingered for a long time, finally do something about it... Change-Id: Ib90408187698d5d4c23ffb0e527011446efc3c7e --- stack.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/stack.sh b/stack.sh index ab1e8fe94d..32dac0f443 100755 --- a/stack.sh +++ b/stack.sh @@ -1419,3 +1419,9 @@ fi # Indicate how long this took to run (bash maintained variable ``SECONDS``) echo_summary "stack.sh completed in $SECONDS seconds." + +# Restore/close logging file descriptors +exec 1>&3 +exec 2>&3 +exec 3>&- +exec 6>&- From 961328fc4622b16135d6d580429dc3e5db01ded5 Mon Sep 17 00:00:00 2001 From: Flavio Percoco Date: Wed, 5 Mar 2014 18:45:56 +0100 Subject: [PATCH 0235/4119] Fix marconi's storage setting for MongoDB The storage driver should be set to mongodb and the driver's uri to the mongodb:// uri. Change-Id: I6193a5d78f6cd7283b4e3b1831978883b9e99b06 --- lib/marconi | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index 29ae386d9f..8f4f3c6bbc 100644 --- a/lib/marconi +++ b/lib/marconi @@ -105,7 +105,8 @@ function configure_marconi { iniset $MARCONI_CONF keystone_authtoken signing_dir $MARCONI_AUTH_CACHE_DIR if [[ "$MARCONI_BACKEND" = 'mongodb' ]]; then - iniset $MARCONI_CONF database connection mongodb://localhost:27017/marconi + iniset $MARCONI_CONF drivers storage mongodb + iniset $MARCONI_CONF 'drivers:storage:mongodb' uri mongodb://localhost:27017/marconi configure_mongodb cleanup_marconi fi From 5fc5b7e231710c2d67522d1bcabdc448dadd0f94 Mon Sep 17 00:00:00 2001 From: Flavio Percoco Date: Wed, 5 Mar 2014 18:49:02 +0100 Subject: [PATCH 0236/4119] Add support for sqlalchemy to Marconi This patch adds a way to setup a marconi instance using sqlalchemy. Change-Id: Ia694b76286835ca2ca935814370aa43544fe84fa --- lib/marconi | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index 8f4f3c6bbc..1e0cc7df08 100644 --- a/lib/marconi +++ b/lib/marconi @@ -104,7 +104,10 @@ function configure_marconi { iniset $MARCONI_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $MARCONI_CONF keystone_authtoken signing_dir $MARCONI_AUTH_CACHE_DIR - if [[ "$MARCONI_BACKEND" = 'mongodb' ]]; then + if [ "$MARCONI_BACKEND" = 'mysql' ] || [ "$MARCONI_BACKEND" = 'postgresql' ] ; then + iniset $MARCONI_CONF drivers storage sqlalchemy + iniset $MARCONI_CONF 'drivers:storage:sqlalchemy' uri `database_connection_url marconi` + else iniset $MARCONI_CONF drivers storage mongodb iniset $MARCONI_CONF 'drivers:storage:mongodb' uri mongodb://localhost:27017/marconi configure_mongodb From d46d9dd8de00d07eee9170365b1a025f0fc01ed9 Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Wed, 5 Mar 2014 13:38:19 +0000 Subject: [PATCH 0237/4119] Inject all account details in tempest.conf The tempest configuration function did not inject all account details in tempest.conf. The only reason why it worked, was because tempest uses default config values which are valid for the current devstack setup. To remove this dependency, two patches are needed: - this one in devstack, to inject all values - https://review.openstack.org/#/c/77602/ in tempest, to change default values to None Partially fixes bug 1287191 Change-Id: I01507b142703a1ff66707464b9a743e9d0ca3e01 --- lib/tempest | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 16f8744d85..8455aae170 100644 --- a/lib/tempest +++ b/lib/tempest @@ -149,8 +149,12 @@ function configure_tempest { password=${ADMIN_PASSWORD:-secrete} - # See files/keystone_data.sh where alt_demo user - # and tenant are set up... + # See files/keystone_data.sh and stack.sh where admin, demo and alt_demo + # user and tenant are set up... + ADMIN_USERNAME=${ADMIN_USERNAME:-admin} + ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin} + TEMPEST_USERNAME=${TEMPEST_USERNAME:-demo} + TEMPEST_TENANT_NAME=${TEMPEST_TENANT_NAME:-demo} ALT_USERNAME=${ALT_USERNAME:-alt_demo} ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo} @@ -254,11 +258,15 @@ function configure_tempest { # Identity iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v3/" + iniset $TEMPEST_CONFIG identity username $TEMPEST_USERNAME iniset $TEMPEST_CONFIG identity password "$password" + iniset $TEMPEST_CONFIG identity tenant_name $TEMPEST_TENANT_NAME iniset $TEMPEST_CONFIG identity alt_username $ALT_USERNAME iniset $TEMPEST_CONFIG identity alt_password "$password" iniset $TEMPEST_CONFIG identity alt_tenant_name $ALT_TENANT_NAME + iniset $TEMPEST_CONFIG identity admin_username $ADMIN_USERNAME iniset $TEMPEST_CONFIG identity admin_password "$password" + iniset $TEMPEST_CONFIG identity admin_tenant_name $ADMIN_TENANT_NAME # Image # for the gate we want to be able to override this variable so we aren't @@ -285,7 +293,9 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method # Compute admin + iniset $TEMPEST_CONFIG "compute-admin" username $USERNAME iniset $TEMPEST_CONFIG "compute-admin" password "$password" # DEPRECATED + iniset $TEMPEST_CONFIG "compute-admin" tenant_name $TENANT_NAME # Network iniset $TEMPEST_CONFIG network api_version 2.0 From 99b622a936c0b6f5b6283f3bcdca3bd7d0628e29 Mon Sep 17 00:00:00 2001 From: Ryan Hsu Date: Wed, 5 Mar 2014 15:35:49 -0800 Subject: [PATCH 0238/4119] Refactor vmdk upload code A syntax error is hit when trying to upload a flat vmdk file that is accompanied by a descriptor file. The code block that handles this has some unneeded characters that cause the error. Also, an else-block has been removed so that we can remove an extra indent. Change-Id: Iaf5c914e09da6831eeeec141228b39554a1e2216 Closes-bug: #1288471 --- functions | 51 +++++++++++++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 26 deletions(-) diff --git a/functions b/functions index ab8319b0ce..1d30922916 100644 --- a/functions +++ b/functions @@ -163,38 +163,37 @@ function upload_image { if [[ ${IMAGE_NAME: -5} != "-flat" ]]; then warn $LINENO "Expected filename suffix: '-flat'."` `" Filename provided: ${IMAGE_NAME}" - else - descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk" - path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` - flat_path="${image_url:0:$path_len}" - descriptor_url=$flat_path$descriptor_fname - warn $LINENO "$descriptor_data_pair_msg"` - `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" - if [[ $flat_path != file* ]]; then - if [[ ! -f $FILES/$descriptor_fname || \ - "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then - wget -c $descriptor_url -O $FILES/$descriptor_fname - if [[ $? -ne 0 ]]; then - warn $LINENO "Descriptor not found $descriptor_url" - descriptor_found=false - fi - fi - descriptor_url="$FILES/$descriptor_fname" - else - descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g") - if [[ ! -f $descriptor_url || \ - "$(stat -c "%s" $descriptor_url)" == "0" ]]; then + fi + + descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk" + path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` + flat_path="${image_url:0:$path_len}" + descriptor_url=$flat_path$descriptor_fname + warn $LINENO "$descriptor_data_pair_msg"` + `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" + if [[ $flat_path != file* ]]; then + if [[ ! -f $FILES/$descriptor_fname || \ + "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then + wget -c $descriptor_url -O $FILES/$descriptor_fname + if [[ $? -ne 0 ]]; then warn $LINENO "Descriptor not found $descriptor_url" descriptor_found=false fi fi - if $descriptor_found; then - vmdk_adapter_type="$(head -25 $descriptor_url |"` - `" { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })" - vmdk_adapter_type="${vmdk_adapter_type#*\"}" - vmdk_adapter_type="${vmdk_adapter_type%?}" + descriptor_url="$FILES/$descriptor_fname" + else + descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g") + if [[ ! -f $descriptor_url || \ + "$(stat -c "%s" $descriptor_url)" == "0" ]]; then + warn $LINENO "Descriptor not found $descriptor_url" + descriptor_found=false fi fi + if $descriptor_found; then + vmdk_adapter_type="$(head -25 $descriptor_url | { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })" + vmdk_adapter_type="${vmdk_adapter_type#*\"}" + vmdk_adapter_type="${vmdk_adapter_type%?}" + fi vmdk_disktype="preallocated" else vmdk_disktype="preallocated" From 581f0ee48510d8eead8a95888ad9b56d89009a76 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Thu, 20 Feb 2014 16:28:15 +0100 Subject: [PATCH 0239/4119] Add a few missing package for SUSE Additionally rearranged the package list to be alphabetically sorrted Change-Id: I52cea97da60437250d0b7cf86a71e4a05d765568 --- files/rpms-suse/baremetal | 1 + files/rpms-suse/general | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) create mode 100644 files/rpms-suse/baremetal diff --git a/files/rpms-suse/baremetal b/files/rpms-suse/baremetal new file mode 100644 index 0000000000..61f73eeae3 --- /dev/null +++ b/files/rpms-suse/baremetal @@ -0,0 +1 @@ +dnsmasq diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 704947ea53..6d994eaf7a 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -1,15 +1,20 @@ +bc bridge-utils ca-certificates-mozilla curl euca2ools +findutils-locate # useful when debugging git-core iputils +libopenssl-devel # to rebuild pyOpenSSL if needed +lsof # useful when debugging +make openssh openssl psmisc -python-setuptools # instead of python-distribute; dist:sle11sp2 python-cmd2 # dist:opensuse-12.3 python-pylint +python-setuptools # instead of python-distribute; dist:sle11sp2 python-unittest2 screen tar @@ -17,7 +22,3 @@ tcpdump unzip vim-enhanced wget -bc - -findutils-locate # useful when debugging -lsof # useful when debugging From 4d8af4aa05a76219b634d02485ae637a404b399f Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Thu, 6 Mar 2014 15:07:53 +0400 Subject: [PATCH 0240/4119] Add n-obj to stop_nova Add missing nova-object service to nova services list Change-Id: Ib26204b69356ad030ba3d03f095993370fbb2676 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 583a5923ce..2d8715ba48 100644 --- a/lib/nova +++ b/lib/nova @@ -731,7 +731,7 @@ function stop_nova { # Kill the nova screen windows # Some services are listed here twice since more than one instance # of a service may be running in certain configs. - for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta; do + for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do screen_stop $serv done if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then From 423d7901a4cd6bc95188e023625b4e21251fad28 Mon Sep 17 00:00:00 2001 From: Nadya Privalova Date: Thu, 6 Mar 2014 15:14:59 +0400 Subject: [PATCH 0241/4119] Add an ability to configure debug-level for ceilometer Change-Id: Ibe9dd2391202a5af291d2eed1559bae60370f9a8 --- lib/ceilometer | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ceilometer b/lib/ceilometer index 04c1a34b8b..b0899e2f24 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -129,6 +129,7 @@ function configure_ceilometer { iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications' iniset $CEILOMETER_CONF DEFAULT verbose True + iniset $CEILOMETER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" # Install the policy file for the API server cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR From d44517dfcfacb5aa9e1952847a1505fd3a92580b Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Tue, 28 Jan 2014 20:29:18 +0000 Subject: [PATCH 0242/4119] Add support for configuring OVS to work with OpenDaylight This adds support for running OpenDaylight as an OpenStack Neutron plugin under devstack. This entails downloading the latest version of OpenDaylight, configuring it, and running it as a service under devstack. This code also includes pieces which configure Open vSwitch on each devstack node to point at OpenDaylight as their OpenFlow and OVSDB control interface. This is required for compute hosts, which will not be running any Neutron software on them at all. This post-devstack configuration is handled in the extras directory because of the fact there is no Neutron code running on the compute hosts themselves. Closes-bug: #1273917 Change-Id: I696e7c7fe63c835f90c56105775def305a702877 --- extras.d/80-opendaylight.sh | 67 ++++++++++++++ files/apts/opendaylight | 2 + files/rpms-suse/opendaylight | 4 + files/rpms/opendaylight | 1 + lib/opendaylight | 167 +++++++++++++++++++++++++++++++++++ 5 files changed, 241 insertions(+) create mode 100644 extras.d/80-opendaylight.sh create mode 100644 files/apts/opendaylight create mode 100644 files/rpms-suse/opendaylight create mode 100644 files/rpms/opendaylight create mode 100644 lib/opendaylight diff --git a/extras.d/80-opendaylight.sh b/extras.d/80-opendaylight.sh new file mode 100644 index 0000000000..cc5c8dec1a --- /dev/null +++ b/extras.d/80-opendaylight.sh @@ -0,0 +1,67 @@ +# opendaylight.sh - DevStack extras script + +# Need this first to get the is_***_enabled for ODL +source $TOP_DIR/lib/opendaylight + +if is_service_enabled odl-server; then + if [[ "$1" == "source" ]]; then + # no-op + : + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + install_opendaylight + configure_opendaylight + init_opendaylight + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + # This has to start before Neutron + start_opendaylight + elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then + # no-op + : + fi + + if [[ "$1" == "unstack" ]]; then + stop_opendaylight + cleanup_opendaylight + fi + + if [[ "$1" == "clean" ]]; then + # no-op + : + fi +fi + +if is_service_enabled odl-compute; then + if [[ "$1" == "source" ]]; then + # no-op + : + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + install_opendaylight-compute + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + create_nova_conf_neutron + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + echo_summary "Initializing OpenDaylight" + ODL_LOCAL_IP=${ODL_LOCAL_IP:-$HOST_IP} + ODL_MGR_PORT=${ODL_MGR_PORT:-6640} + read ovstbl <<< $(sudo ovs-vsctl get Open_vSwitch . _uuid) + sudo ovs-vsctl set-manager tcp:$ODL_MGR_IP:$ODL_MGR_PORT + sudo ovs-vsctl set Open_vSwitch $ovstbl other_config={"local_ip"="$ODL_LOCAL_IP"} + elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then + # no-op + : + fi + + if [[ "$1" == "unstack" ]]; then + sudo ovs-vsctl del-manager + BRIDGES=$(sudo ovs-vsctl list-br) + for bridge in $BRIDGES ; do + sudo ovs-vsctl del-controller $bridge + done + + stop_opendaylight-compute + fi + + if [[ "$1" == "clean" ]]; then + # no-op + : + fi +fi diff --git a/files/apts/opendaylight b/files/apts/opendaylight new file mode 100644 index 0000000000..ec3cc9daf8 --- /dev/null +++ b/files/apts/opendaylight @@ -0,0 +1,2 @@ +openvswitch-datapath-dkms # NOPRIME +openvswitch-switch # NOPRIME diff --git a/files/rpms-suse/opendaylight b/files/rpms-suse/opendaylight new file mode 100644 index 0000000000..d6c7146331 --- /dev/null +++ b/files/rpms-suse/opendaylight @@ -0,0 +1,4 @@ +openvswitch # NOPRIME +openvswitch-controller # NOPRIME +openvswitch-switch # NOPRIME + diff --git a/files/rpms/opendaylight b/files/rpms/opendaylight new file mode 100644 index 0000000000..98aaaf48f7 --- /dev/null +++ b/files/rpms/opendaylight @@ -0,0 +1 @@ +openvswitch # NOPRIME diff --git a/lib/opendaylight b/lib/opendaylight new file mode 100644 index 0000000000..ca81c20e55 --- /dev/null +++ b/lib/opendaylight @@ -0,0 +1,167 @@ +# lib/opendaylight +# Functions to control the configuration and operation of the opendaylight service + +# Dependencies: +# +# - ``functions`` file +# # ``DEST`` must be defined +# # ``STACK_USER`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# - is_opendaylight_enabled +# - is_opendaylight-compute_enabled +# - install_opendaylight +# - install_opendaylight-compute +# - configure_opendaylight +# - init_opendaylight +# - start_opendaylight +# - stop_opendaylight-compute +# - stop_opendaylight +# - cleanup_opendaylight + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# For OVS_BRIDGE and PUBLIC_BRIDGE +source $TOP_DIR/lib/neutron_plugins/ovs_base + +# Defaults +# -------- + +# The IP address of ODL. Set this in local.conf. +# ODL_MGR_IP= +ODL_MGR_IP=${ODL_MGR_IP:-$SERVICE_HOST} + +# +ODL_DIR=$DEST/opendaylight + +# The OpenDaylight Package, currently using 'Hydrogen' release +ODL_PKG=${ODL_PKG:-distributions-virtualization-0.1.1-osgipackage.zip} + +# The OpenDaylight URL +ODL_URL=${ODL_URL:-https://nexus.opendaylight.org/content/repositories/opendaylight.release/org/opendaylight/integration/distributions-virtualization/0.1.1} + +# Default arguments for OpenDaylight. This is typically used to set +# Java memory options. +# ODL_ARGS=Xmx1024m -XX:MaxPermSize=512m +ODL_ARGS=${ODL_ARGS:-"-XX:MaxPermSize=384m"} + +# How long to pause after ODL starts to let it complete booting +ODL_BOOT_WAIT=${ODL_BOOT_WAIT:-60} + +# Set up default directories + + +# Entry Points +# ------------ + +# Test if OpenDaylight is enabled +# is_opendaylight_enabled +function is_opendaylight_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"odl-" ]] && return 0 + return 1 +} + +# cleanup_opendaylight() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_opendaylight { + : +} + +# configure_opendaylight() - Set config files, create data dirs, etc +function configure_opendaylight { + # Remove simple forwarder + rm -f $ODL_DIR/opendaylight/plugins/org.opendaylight.controller.samples.simpleforwarding* + + # Configure OpenFlow 1.3 + echo "ovsdb.of.version=1.3" >> $ODL_DIR/opendaylight/configuration/config.ini +} + +# init_opendaylight() - Initialize databases, etc. +function init_opendaylight { + # clean up from previous (possibly aborted) runs + # create required data files + : +} + +# install_opendaylight() - Collect source and prepare +function install_opendaylight { + local _pwd=$(pwd) + + if is_ubuntu; then + install_package maven openjdk-7-jre openjdk-7-jdk + else + yum_install maven java-1.7.0-openjdk + fi + + # Download OpenDaylight + mkdir -p $ODL_DIR + cd $ODL_DIR + wget -N $ODL_URL/$ODL_PKG + unzip -u $ODL_PKG +} + +# install_opendaylight-compute - Make sure OVS is install +function install_opendaylight-compute { + local kernel_version + # Install deps + # FIXME add to ``files/apts/neutron``, but don't install if not needed! + if is_ubuntu; then + kernel_version=`cat /proc/version | cut -d " " -f3` + install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version + elif is_fedora; then + install_package openvswitch + # Ensure that the service is started + restart_service openvswitch + elif is_suse; then + install_package openvswitch + restart_service openvswitch-switch + restart_service openvswitch-controller + fi +} + +# start_opendaylight() - Start running processes, including screen +function start_opendaylight { + if is_ubuntu; then + JHOME=/usr/lib/jvm/java-1.7.0-openjdk-amd64 + else + JHOME=/usr/lib/jvm/java-1.7.0-openjdk + fi + + # The flags to ODL have the following meaning: + # -of13: runs ODL using OpenFlow 1.3 protocol support. + # -virt ovsdb: Runs ODL in "virtualization" mode with OVSDB support + screen_it odl-server "cd $ODL_DIR/opendaylight && JAVE_HOME=$JHOME ./run.sh $ODL_ARGS -of13 -virt ovsdb" + + # Sleep a bit to let OpenDaylight finish starting up + sleep $ODL_BOOT_WAIT +} + +# stop_opendaylight() - Stop running processes (non-screen) +function stop_opendaylight { + screen_stop odl-server +} + +# stop_opendaylight-compute() - Remove OVS bridges +function stop_opendaylight-compute { + # remove all OVS ports that look like Neutron created ports + for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do + sudo ovs-vsctl del-port ${port} + done + + # remove all OVS bridges created by Neutron + for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BRIDGE} -e ${PUBLIC_BRIDGE}); do + sudo ovs-vsctl del-br ${bridge} + done +} + +# Restore xtrace +$XTRACE + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: From a99b869d3c14b33d0cf59877f3ae60686763f8ae Mon Sep 17 00:00:00 2001 From: Sergey Skripnick Date: Wed, 5 Mar 2014 14:47:58 +0200 Subject: [PATCH 0243/4119] Do not restart libvirt if n-cpu is disabled If this service is disable in localrc, libvirt does not installed at all, and should not be restarted. Change-Id: Iaf482d4a82a26546c25249b3e32c7e629d862a1b Closes: bug 1288236 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 583a5923ce..2f6d04db78 100644 --- a/lib/nova +++ b/lib/nova @@ -308,7 +308,7 @@ function configure_nova { # Rebuild the config file from scratch create_nova_conf - if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then # Configure hypervisor plugin configure_nova_hypervisor fi From b44a8ef14f4e177aef0528db2b7721030f76b290 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 6 Mar 2014 11:25:04 -0600 Subject: [PATCH 0244/4119] Fix errexit in lib/ldap clear_ldap_state() deletes an object from the DIT that doesn't exist on the first run, this is OK but fails with errexit enabled. Change-Id: I3b881eedc891caa6b2dfd5913e43f3babcfa7d47 --- lib/ldap | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ldap b/lib/ldap index 51d02519af..efe2f096d7 100644 --- a/lib/ldap +++ b/lib/ldap @@ -154,7 +154,7 @@ function stop_ldap { # clear_ldap_state() - Clear LDAP State function clear_ldap_state { - ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN" + ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN" || : } # Restore xtrace From 1eae3e155a25faa8e0bb6ddba77e580c774fd265 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 6 Mar 2014 11:49:22 -0600 Subject: [PATCH 0245/4119] Make stop_swift() more robust for Grenade stop_swift() wasn't calling screen_stop() so the pid files and screen sessions were not being cleaned up. DevStack doesn't really care but Grenade does for the 'base' copy of DevStack. This should be backported to stable/havana for this reason. Change-Id: Ib5afb321cef2b7ad74e69a3fd0d1dad469f78b11 --- lib/swift | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/swift b/lib/swift index 5d4d4ef506..b8bc1b66e7 100644 --- a/lib/swift +++ b/lib/swift @@ -687,6 +687,11 @@ function stop_swift { swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true fi # Dump all of the servers + # Maintain the iteration as screen_stop() has some desirable side-effects + for type in proxy object container account; do + screen_stop s-${type} + done + # Blast out any stragglers pkill -f swift- } From f5d2a5ceb4030aa0868b11ef84b5055b70693702 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 6 Mar 2014 13:45:42 -0500 Subject: [PATCH 0246/4119] test for adding crazy branches as found by dansmith's clever hack, if devstack lands a crazy branch name in stackrc, we'd break the devstack gate. While it's doubtful anyone would do this, add a basic sanity check. Change-Id: Ib3b1881ed4fd520a1828ed073a7c8353e6f0a839 --- run_tests.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/run_tests.sh b/run_tests.sh index a0bfbee0c0..685b2037f0 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -27,3 +27,16 @@ fi echo "Running bash8..." ./tools/bash8.py -v $FILES + + +# Test that no one is trying to land crazy refs as branches + +echo "Ensuring we don't have crazy refs" + +REFS=`grep BRANCH stackrc | grep -v -- '-master'` +rc=$? +if [[ $rc -eq 0 ]]; then + echo "Branch defaults must be master. Found:" + echo $REFS + exit 1 +fi From 07f1d0ef3d638d2289a45a17546e976907e004ee Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Thu, 6 Mar 2014 23:23:01 +0000 Subject: [PATCH 0247/4119] Iniset keystone auth version Introduces support for suth_version config flag in lib/tempest. The variable is named TEMPEST_AUTH_VERSION, and it can be set via localrc, so that the devstack-vm-gate-wrap may control it. The aim is to setup a keystone v3 based experimental check job in tempest experimental pipeline. Partially implements bp multi-keystone-api-version-tests Change-Id: Ia6832d87308c6c7109e6ae0dbd8dff61134718ee --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 8455aae170..b90988d1d9 100644 --- a/lib/tempest +++ b/lib/tempest @@ -267,6 +267,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG identity admin_username $ADMIN_USERNAME iniset $TEMPEST_CONFIG identity admin_password "$password" iniset $TEMPEST_CONFIG identity admin_tenant_name $ADMIN_TENANT_NAME + iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v2} # Image # for the gate we want to be able to override this variable so we aren't From e530ba30a6965c016934819be5b1cfcaa6879b75 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 7 Mar 2014 05:58:18 -0500 Subject: [PATCH 0248/4119] make compute-admin correct we lost the admin tenant at some point in the last couple of days which disabled 500 tempest tests. Bring this back. Change-Id: I5cab2074777cab99982ae8fc4a83663e9d128284 --- lib/tempest | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index 8455aae170..b3736da963 100644 --- a/lib/tempest +++ b/lib/tempest @@ -293,9 +293,9 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method # Compute admin - iniset $TEMPEST_CONFIG "compute-admin" username $USERNAME - iniset $TEMPEST_CONFIG "compute-admin" password "$password" # DEPRECATED - iniset $TEMPEST_CONFIG "compute-admin" tenant_name $TENANT_NAME + iniset $TEMPEST_CONFIG "compute-admin" username $ADMIN_USERNAME + iniset $TEMPEST_CONFIG "compute-admin" password "$password" + iniset $TEMPEST_CONFIG "compute-admin" tenant_name $ADMIN_TENANT_NAME # Network iniset $TEMPEST_CONFIG network api_version 2.0 From bb1e07859cce688e3beed2c573e9073a72f778fb Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 6 Mar 2014 09:40:27 -0800 Subject: [PATCH 0249/4119] Don't install vim or locate by default Devstack doesn't need vim or locate, if someone wants to use them, they can just install them afterwards. Change-Id: I00f27c20c86d89465e4aefc67ed645a309c09a03 --- files/apts/general | 2 -- files/rpms-suse/general | 2 -- tools/xen/prepare_guest.sh | 2 +- 3 files changed, 1 insertion(+), 5 deletions(-) diff --git a/files/apts/general b/files/apts/general index 32d31f0642..995c0c6f88 100644 --- a/files/apts/general +++ b/files/apts/general @@ -9,8 +9,6 @@ git lsof # useful when debugging openssh-server openssl -vim-nox -locate # useful when debugging python-virtualenv python-unittest2 iputils-ping diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 6d994eaf7a..ff27a3aac7 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -3,7 +3,6 @@ bridge-utils ca-certificates-mozilla curl euca2ools -findutils-locate # useful when debugging git-core iputils libopenssl-devel # to rebuild pyOpenSSL if needed @@ -20,5 +19,4 @@ screen tar tcpdump unzip -vim-enhanced wget diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index 440774ec5b..2b5e418a6a 100755 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -73,7 +73,7 @@ EOF # Install basics apt-get update apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool -apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo python-netaddr +apt-get install -y curl wget ssh openssh-server python-pip git sudo python-netaddr pip install xenapi # Install XenServer guest utilities From b27f16d71660f75fcd82a035cdaf2b2eddec99ce Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 28 Feb 2014 14:29:02 +1100 Subject: [PATCH 0250/4119] Detect missing packages with yum yum -y doesn't report an error when packages are missing (see [1] for upstream discussion). Thus we run the output of yum through a small awk script looking for missing packages output. The one change required for RHEL is that python-wsgiref is included in the distro python, so doesn't need a separate package. [1] https://bugzilla.redhat.com/show_bug.cgi?id=965567 Change-Id: I9908ff4edbf2b0d961d25837a08a34e1417bbb02 --- files/rpms/glance | 2 +- functions-common | 17 ++++++++++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/files/rpms/glance b/files/rpms/glance index 25c5d3902b..534097a92f 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -13,6 +13,6 @@ python-lxml #dist:f18,f19,f20,rhel7 python-paste-deploy #dist:f18,f19,f20,rhel7 python-routes python-sqlalchemy -python-wsgiref +python-wsgiref #dist:f18,f19,f20 pyxattr zlib-devel # testonly diff --git a/functions-common b/functions-common index 0db3ff3e7c..ed3d8832fd 100644 --- a/functions-common +++ b/functions-common @@ -938,9 +938,24 @@ function yum_install { [[ "$OFFLINE" = "True" ]] && return local sudo="sudo" [[ "$(id -u)" = "0" ]] && sudo="env" + + # The manual check for missing packages is because yum -y assumes + # missing packages are OK. See + # https://bugzilla.redhat.com/show_bug.cgi?id=965567 $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ no_proxy=$no_proxy \ - yum install -y "$@" + yum install -y "$@" 2>&1 | \ + awk ' + BEGIN { fail=0 } + /No package/ { fail=1 } + { print } + END { exit fail }' || \ + die $LINENO "Missing packages detected" + + # also ensure we catch a yum failure + if [[ ${PIPESTATUS[0]} != 0 ]]; then + die $LINENO "Yum install failure" + fi } # zypper wrapper to set arguments correctly From f19ccb63593e4c3e6c1c2a7d4f2552c30ca1ee62 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Sat, 8 Mar 2014 07:54:05 -0800 Subject: [PATCH 0251/4119] Take tempurl out of Swift pipeline additions Swift commit 165dd44 added tempurl to the sample config, so now it appears twice in the default devstack-installed configuration. This commit removes tempurl from $SWIFT_EXTRAS_MIDDLEWARE so that it only appears once in the generated proxy pipeline. Change-Id: I4204b2a444312ab87c17f5fb296a43818a4528a6 --- lib/swift | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/swift b/lib/swift index b8bc1b66e7..b65544046a 100644 --- a/lib/swift +++ b/lib/swift @@ -67,8 +67,8 @@ fi SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_DEFAULT} # Set ``SWIFT_EXTRAS_MIDDLEWARE`` to extras middlewares. -# Default is ``staticweb, tempurl, formpost`` -SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb} +# Default is ``staticweb, formpost`` +SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-formpost staticweb} # Set ``SWIFT_EXTRAS_MIDDLEWARE_LAST`` to extras middlewares that need to be at # the end of the pipeline. From 11b36c9b0a0a04ff3a53ae95c6de94fdd457f5e7 Mon Sep 17 00:00:00 2001 From: Roey Chen Date: Mon, 10 Mar 2014 11:25:50 +0200 Subject: [PATCH 0252/4119] Fixed unconditioned source phase in OpenDaylight extras Should source ``lib/opendaylight`` in ``extras.d/80-opendaylight.sh`` only when appropriate services are enabled. Fix for bug/1290033 Change-Id: Ifa470e1e132029f3c5bf255f27c4e96373b339a8 Signed-off-by: Roey Chen --- extras.d/80-opendaylight.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/extras.d/80-opendaylight.sh b/extras.d/80-opendaylight.sh index cc5c8dec1a..57b43288e0 100644 --- a/extras.d/80-opendaylight.sh +++ b/extras.d/80-opendaylight.sh @@ -1,7 +1,9 @@ # opendaylight.sh - DevStack extras script -# Need this first to get the is_***_enabled for ODL -source $TOP_DIR/lib/opendaylight +if is_service_enabled odl-server odl-compute; then + # Initial source + [[ "$1" == "source" ]] && source $TOP_DIR/lib/opendaylight +fi if is_service_enabled odl-server; then if [[ "$1" == "source" ]]; then From d9259ea466e54349fa87e7f76b7dfd061b19423c Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 10 Mar 2014 08:39:15 -0400 Subject: [PATCH 0253/4119] remove distros that are out of support by their upstream raring EOL was - 27 Jan 2014 f18 EOL was - 14 Jan 2014 opensuse 12.2 was - 15 Jan 2014 if their upstream isn't going to support them, we shouldn't be in devstack. this additionally leaves us in an interesting situation that there is no longer *any* opensuse version listed as supported. if the opensuse community doesn't step up here we should probably look at removing it. Change-Id: Ibb883930b430477dfd3b5126c5db04f95a50d3a7 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 148ce04e28..e76a55c534 100755 --- a/stack.sh +++ b/stack.sh @@ -142,7 +142,7 @@ disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (precise|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f18|f19|f20|opensuse-12.2|rhel6) ]]; then +if [[ ! ${DISTRO} =~ (precise|saucy|trusty|7.0|wheezy|sid|testing|jessie|f19|f20|rhel6) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 353c4f1240d974e9ce93ba1f00a4bc7fe2c5856e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 10 Mar 2014 08:44:18 -0400 Subject: [PATCH 0254/4119] remove additional f18 references f18 has been EOL for 6 weeks now, time to purge it from devstack Change-Id: I5aac2c63b2f4cd8b01ae685b1acf4c188637558b --- files/rpms/cinder | 2 +- files/rpms/glance | 4 ++-- files/rpms/horizon | 4 ++-- files/rpms/keystone | 8 ++++---- files/rpms/neutron | 4 ++-- files/rpms/nova | 6 +++--- files/rpms/swift | 2 +- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/files/rpms/cinder b/files/rpms/cinder index 199ae10b79..423d57cd98 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -4,4 +4,4 @@ qemu-img python-devel postgresql-devel iscsi-initiator-utils -python-lxml #dist:f18,f19,f20,rhel7 +python-lxml #dist:f19,f20,rhel7 diff --git a/files/rpms/glance b/files/rpms/glance index 25c5d3902b..c886ecee10 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -9,8 +9,8 @@ python-argparse python-devel python-eventlet python-greenlet -python-lxml #dist:f18,f19,f20,rhel7 -python-paste-deploy #dist:f18,f19,f20,rhel7 +python-lxml #dist:f19,f20,rhel7 +python-paste-deploy #dist:f19,f20,rhel7 python-routes python-sqlalchemy python-wsgiref diff --git a/files/rpms/horizon b/files/rpms/horizon index 59503cc9aa..2dd24e0763 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -16,8 +16,8 @@ python-kombu python-migrate python-mox python-nose -python-paste #dist:f18,f19,f20 -python-paste-deploy #dist:f18,f19,f20 +python-paste #dist:f19,f20 +python-paste-deploy #dist:f19,f20 python-routes python-sphinx python-sqlalchemy diff --git a/files/rpms/keystone b/files/rpms/keystone index 99e8524628..7182091b31 100644 --- a/files/rpms/keystone +++ b/files/rpms/keystone @@ -1,9 +1,9 @@ python-greenlet libxslt-devel # dist:f20 -python-lxml #dist:f18,f19,f20 -python-paste #dist:f18,f19,f20 -python-paste-deploy #dist:f18,f19,f20 -python-paste-script #dist:f18,f19,f20 +python-lxml #dist:f19,f20 +python-paste #dist:f19,f20 +python-paste-deploy #dist:f19,f20 +python-paste-script #dist:f19,f20 python-routes python-sqlalchemy python-webob diff --git a/files/rpms/neutron b/files/rpms/neutron index 42d7f68d37..06ea0ea35d 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron @@ -11,8 +11,8 @@ python-greenlet python-iso8601 python-kombu #rhel6 gets via pip -python-paste # dist:f18,f19,f20,rhel7 -python-paste-deploy # dist:f18,f19,f20,rhel7 +python-paste # dist:f19,f20,rhel7 +python-paste-deploy # dist:f19,f20,rhel7 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/nova b/files/rpms/nova index a607d925e1..45d6e0bfb1 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -28,11 +28,11 @@ python-kombu python-lockfile python-migrate python-mox -python-paramiko # dist:f18,f19,f20,rhel7 +python-paramiko # dist:f19,f20,rhel7 # ^ on RHEL6, brings in python-crypto which conflicts with version from # pip we need -python-paste # dist:f18,f19,f20,rhel7 -python-paste-deploy # dist:f18,f19,f20,rhel7 +python-paste # dist:f19,f20,rhel7 +python-paste-deploy # dist:f19,f20,rhel7 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/swift b/files/rpms/swift index 72253f7752..bf29ea29b7 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -9,7 +9,7 @@ python-eventlet python-greenlet python-netifaces python-nose -python-paste-deploy # dist:f18,f19,f20,rhel7 +python-paste-deploy # dist:f19,f20,rhel7 python-simplejson python-webob pyxattr From 13349080b11383697f7c5312c357cc6c336ff9ba Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 10 Mar 2014 11:27:23 -0400 Subject: [PATCH 0255/4119] put libvirt debug in the right place libvirt debug setting was happening in a place where we weren't actually resetting the daemon. Move it to into the hypervisor plugin where we do. Change-Id: Ia79b0ef50f6b8fb007a20ce5cb4e510a5e4600a5 --- lib/nova | 11 ----------- lib/nova_plugins/hypervisor-libvirt | 10 ++++++++++ 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/lib/nova b/lib/nova index 583a5923ce..f5e0d11281 100644 --- a/lib/nova +++ b/lib/nova @@ -665,17 +665,6 @@ function start_nova_compute { fi if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - # Enable client side traces for libvirt - local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" - local log_outputs="1:file:/var/log/libvirt/libvirtd.log" - # Enable server side traces for libvirtd - if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then - echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf - fi - if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then - echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf - fi - # The group **$LIBVIRT_GROUP** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'" diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index bbf65546f7..26880e5850 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -103,6 +103,16 @@ EOF fi add_user_to_group $STACK_USER $LIBVIRT_GROUP + # Enable server side traces for libvirtd + local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" + local log_outputs="1:file:/var/log/libvirt/libvirtd.log" + if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then + echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi + if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then + echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi + # libvirt detects various settings on startup, as we potentially changed # the system configuration (modules, filesystems), we need to restart # libvirt to detect those changes. From 2983474e37d6c97c482e154a1f0d1f60a709915b Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 9 Mar 2014 18:36:42 +0100 Subject: [PATCH 0256/4119] Use the $SERVICE_HOST in backup_swift_url The $SERVICE_HOST is used to specify the swift proxy endpoint, the c-bak should use the same endpoint. Change-Id: Ia815f514839b0d1ec3fb9bb40992637c4f123e06 --- lib/cinder | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/cinder b/lib/cinder index dd2956a5b4..dadbe40a3b 100644 --- a/lib/cinder +++ b/lib/cinder @@ -274,6 +274,10 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT lock_path $CINDER_STATE_PATH iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL + if is_service_enabled swift; then + iniset $CINDER_CONF DEFAULT backup_swift_url "http://$SERVICE_HOST:8080/v1/AUTH_" + fi + if is_service_enabled ceilometer; then iniset $CINDER_CONF DEFAULT notification_driver "cinder.openstack.common.notifier.rpc_notifier" fi From cea32b1f86631761e170413124dbf80972234a8c Mon Sep 17 00:00:00 2001 From: Aaron Rosen Date: Tue, 4 Mar 2014 16:20:14 -0800 Subject: [PATCH 0257/4119] Configuration needed for neutron nova callback Change-Id: I07cb476f5e87e967cd6fbbfc82881e8a147453b4 --- lib/neutron | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/lib/neutron b/lib/neutron index bb591abb0b..84e827761a 100644 --- a/lib/neutron +++ b/lib/neutron @@ -110,6 +110,10 @@ Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} # nova vif driver that all plugins should use NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} +Q_NOTIFY_NOVA_PORT_STATUS_CHANGE=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGE:-True} +Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_CHANGE:-True} +VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} +VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} # The next two variables are configured by plugin # e.g. _configure_neutron_l3_agent or lib/neutron_plugins/* @@ -313,6 +317,9 @@ function create_nova_conf_neutron { if is_service_enabled q-meta; then iniset $NOVA_CONF DEFAULT service_neutron_metadata_proxy "True" fi + + iniset $NOVA_CONF DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL" + iniset $NOVA_CONF DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT" } # create_neutron_cache_dir() - Part of the _neutron_setup_keystone() process @@ -754,6 +761,16 @@ function _configure_neutron_service { iniset $NEUTRON_CONF DEFAULT ${I/=/ } done + # Configuration for neutron notifations to nova. + iniset $NEUTRON_CONF DEFAULT notify_nova_port_status_change $Q_NOTIFY_NOVA_PORT_STATUS_CHANGE + iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES + iniset $NEUTRON_CONF DEFAULT nova_url "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2" + iniset $NEUTRON_CONF DEFAULT nova_admin_username nova $NOVA_USER + iniset $NEUTRON_CONF DEFAULT nova_admin_password $SERVICE_PASSWORD + ADMIN_TENANT_ID=$(openstack project list | awk "/ service / { print \$2 }") + iniset $NEUTRON_CONF DEFAULT nova_admin_tenant_id $ADMIN_TENANT_ID + iniset $NEUTRON_CONF DEFAULT nova_admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" + # Configure plugin neutron_plugin_configure_service } From 42a59c2bfae69eca5520748d6b45803a387fdb88 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 3 Mar 2014 14:31:29 -0600 Subject: [PATCH 0258/4119] Complete moving Keystone setup out of keystone_data.sh * Move remaining role creation to create_keystone_accounts() * Move glance creation to create_glance_accounts() * Move nova/ec2/s3 creation to create_nova_accounts() * Move ceilometer creation to create_ceilometer_accounts() * Move tempest creation to create_tempest_accounts() * Convert moved code to use OpenStackClient for setup * files/keystone_data.sh is removed Note that the SERVICE_TENANT and ADMIN_ROLE lookups in the other service implementations are not necessary with OSC, all operations can be done using names rather than requiring IDs. Change-Id: I4283ca0036ae39fd44ed2eed834b69d78e4f8257 --- extras.d/80-tempest.sh | 2 +- files/keystone_data.sh | 146 ----------------------------------------- lib/ceilometer | 12 ++++ lib/glance | 43 ++++++++++++ lib/keystone | 19 ++++-- lib/nova | 47 ++++++++++++- lib/tempest | 24 +++++++ stack.sh | 21 ++---- 8 files changed, 146 insertions(+), 168 deletions(-) delete mode 100755 files/keystone_data.sh diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh index 0186e36aee..74f4c60d10 100644 --- a/extras.d/80-tempest.sh +++ b/extras.d/80-tempest.sh @@ -9,7 +9,7 @@ if is_service_enabled tempest; then install_tempest elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then # Tempest config must come after layer 2 services are running - : + create_tempest_accounts elif [[ "$1" == "stack" && "$2" == "extra" ]]; then echo_summary "Initializing Tempest" configure_tempest diff --git a/files/keystone_data.sh b/files/keystone_data.sh deleted file mode 100755 index fc1e8136a4..0000000000 --- a/files/keystone_data.sh +++ /dev/null @@ -1,146 +0,0 @@ -#!/bin/bash -# -# Initial data for Keystone using python-keystoneclient -# -# Tenant User Roles -# ------------------------------------------------------------------ -# service glance service -# service glance-swift ResellerAdmin -# service heat service # if enabled -# service ceilometer admin # if enabled -# Tempest Only: -# alt_demo alt_demo Member -# -# Variables set before calling this script: -# SERVICE_TOKEN - aka admin_token in keystone.conf -# SERVICE_ENDPOINT - local Keystone admin endpoint -# SERVICE_TENANT_NAME - name of tenant containing service accounts -# SERVICE_HOST - host used for endpoint creation -# ENABLED_SERVICES - stack.sh's list of services to start -# DEVSTACK_DIR - Top-level DevStack directory -# KEYSTONE_CATALOG_BACKEND - used to determine service catalog creation - -# Defaults -# -------- - -ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete} -SERVICE_PASSWORD=${SERVICE_PASSWORD:-$ADMIN_PASSWORD} -export SERVICE_TOKEN=$SERVICE_TOKEN -export SERVICE_ENDPOINT=$SERVICE_ENDPOINT -SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} - -# Roles -# ----- - -# The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it. -# The admin role in swift allows a user to act as an admin for their tenant, -# but ResellerAdmin is needed for a user to act as any tenant. The name of this -# role is also configurable in swift-proxy.conf -keystone role-create --name=ResellerAdmin -# Service role, so service users do not have to be admins -keystone role-create --name=service - - -# Services -# -------- - -if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then - # Nova needs ResellerAdmin role to download images when accessing - # swift through the s3 api. - keystone user-role-add \ - --tenant $SERVICE_TENANT_NAME \ - --user nova \ - --role ResellerAdmin -fi - -# Glance -if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then - keystone user-create \ - --name=glance \ - --pass="$SERVICE_PASSWORD" \ - --tenant $SERVICE_TENANT_NAME \ - --email=glance@example.com - keystone user-role-add \ - --tenant $SERVICE_TENANT_NAME \ - --user glance \ - --role service - # required for swift access - if [[ "$ENABLED_SERVICES" =~ "s-proxy" ]]; then - keystone user-create \ - --name=glance-swift \ - --pass="$SERVICE_PASSWORD" \ - --tenant $SERVICE_TENANT_NAME \ - --email=glance-swift@example.com - keystone user-role-add \ - --tenant $SERVICE_TENANT_NAME \ - --user glance-swift \ - --role ResellerAdmin - fi - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - keystone service-create \ - --name=glance \ - --type=image \ - --description="Glance Image Service" - keystone endpoint-create \ - --region RegionOne \ - --service glance \ - --publicurl "http://$SERVICE_HOST:9292" \ - --adminurl "http://$SERVICE_HOST:9292" \ - --internalurl "http://$SERVICE_HOST:9292" - fi -fi - -# Ceilometer -if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then - # Ceilometer needs ResellerAdmin role to access swift account stats. - keystone user-role-add --tenant $SERVICE_TENANT_NAME \ - --user ceilometer \ - --role ResellerAdmin -fi - -# EC2 -if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - keystone service-create \ - --name=ec2 \ - --type=ec2 \ - --description="EC2 Compatibility Layer" - keystone endpoint-create \ - --region RegionOne \ - --service ec2 \ - --publicurl "http://$SERVICE_HOST:8773/services/Cloud" \ - --adminurl "http://$SERVICE_HOST:8773/services/Admin" \ - --internalurl "http://$SERVICE_HOST:8773/services/Cloud" - fi -fi - -# S3 -if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift3" ]]; then - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - keystone service-create \ - --name=s3 \ - --type=s3 \ - --description="S3" - keystone endpoint-create \ - --region RegionOne \ - --service s3 \ - --publicurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ - --adminurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ - --internalurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" - fi -fi - -if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then - # Tempest has some tests that validate various authorization checks - # between two regular users in separate tenants - keystone tenant-create \ - --name=alt_demo - keystone user-create \ - --name=alt_demo \ - --pass="$ADMIN_PASSWORD" \ - --email=alt_demo@example.com - keystone user-role-add \ - --tenant alt_demo \ - --user alt_demo \ - --role Member -fi diff --git a/lib/ceilometer b/lib/ceilometer index 04c1a34b8b..b8305b1e9e 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -69,6 +69,11 @@ function is_ceilometer_enabled { # create_ceilometer_accounts() - Set up common required ceilometer accounts +# Project User Roles +# ------------------------------------------------------------------ +# SERVICE_TENANT_NAME ceilometer admin +# SERVICE_TENANT_NAME ceilometer ResellerAdmin (if Swift is enabled) + create_ceilometer_accounts() { SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") @@ -99,6 +104,13 @@ create_ceilometer_accounts() { --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" fi + if is_service_enabled swift; then + # Ceilometer needs ResellerAdmin role to access swift account stats. + openstack role add \ + --project $SERVICE_TENANT_NAME \ + --user ceilometer \ + ResellerAdmin + fi fi } diff --git a/lib/glance b/lib/glance index 8a4c21b3f2..51e4399388 100644 --- a/lib/glance +++ b/lib/glance @@ -159,6 +159,49 @@ function configure_glance { cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON } +# create_glance_accounts() - Set up common required glance accounts + +# Project User Roles +# ------------------------------------------------------------------ +# SERVICE_TENANT_NAME glance service +# SERVICE_TENANT_NAME glance-swift ResellerAdmin (if Swift is enabled) + +function create_glance_accounts { + if is_service_enabled g-api; then + openstack user create \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT_NAME \ + glance + openstack role add \ + --project $SERVICE_TENANT_NAME \ + --user glance \ + service + # required for swift access + if is_service_enabled s-proxy; then + openstack user create \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT_NAME \ + glance-swift + openstack role add \ + --project $SERVICE_TENANT_NAME \ + --user glance-swift \ + ResellerAdmin + fi + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + openstack service create \ + --type image \ + --description "Glance Image Service" \ + glance + openstack endpoint create \ + --region RegionOne \ + --publicurl "http://$GLANCE_HOSTPORT" \ + --adminurl "http://$GLANCE_HOSTPORT" \ + --internalurl "http://$GLANCE_HOSTPORT" \ + glance + fi + fi +} + # create_glance_cache_dir() - Part of the init_glance() process function create_glance_cache_dir { # Create cache dir diff --git a/lib/keystone b/lib/keystone index c6856c95c3..b31cc57a56 100644 --- a/lib/keystone +++ b/lib/keystone @@ -266,9 +266,11 @@ function configure_keystone { # Tenant User Roles # ------------------------------------------------------------------ +# admin admin admin # service -- -- +# -- -- service +# -- -- ResellerAdmin # -- -- Member -# admin admin admin # demo admin admin # demo demo Member, anotherrole # invisible_to_admin demo Member @@ -294,10 +296,17 @@ function create_keystone_accounts { --project $ADMIN_TENANT \ --user $ADMIN_USER - # service - SERVICE_TENANT=$(openstack project create \ - $SERVICE_TENANT_NAME \ - | grep " id " | get_field 2) + # Create service project/role + openstack project create $SERVICE_TENANT_NAME + + # Service role, so service users do not have to be admins + openstack role create service + + # The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it. + # The admin role in swift allows a user to act as an admin for their tenant, + # but ResellerAdmin is needed for a user to act as any tenant. The name of this + # role is also configurable in swift-proxy.conf + openstack role create ResellerAdmin # The Member role is used by Horizon and Swift so we need to keep it: MEMBER_ROLE=$(openstack role create \ diff --git a/lib/nova b/lib/nova index 583a5923ce..a7c44211ca 100644 --- a/lib/nova +++ b/lib/nova @@ -316,9 +316,10 @@ function configure_nova { # create_nova_accounts() - Set up common required nova accounts -# Tenant User Roles +# Project User Roles # ------------------------------------------------------------------ -# service nova admin, [ResellerAdmin (swift only)] +# SERVICE_TENANT_NAME nova admin +# SERVICE_TENANT_NAME nova ResellerAdmin (if Swift is enabled) # Migrated from keystone_data.sh create_nova_accounts() { @@ -363,6 +364,48 @@ create_nova_accounts() { --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" fi fi + + if is_service_enabled n-api; then + # Swift + if is_service_enabled swift; then + # Nova needs ResellerAdmin role to download images when accessing + # swift through the s3 api. + openstack role add \ + --project $SERVICE_TENANT_NAME \ + --user nova \ + ResellerAdmin + fi + + # EC2 + if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then + openstack service create \ + --type ec2 \ + --description "EC2 Compatibility Layer" \ + ec2 + openstack endpoint create \ + --region RegionOne \ + --publicurl "http://$SERVICE_HOST:8773/services/Cloud" \ + --adminurl "http://$SERVICE_HOST:8773/services/Admin" \ + --internalurl "http://$SERVICE_HOST:8773/services/Cloud" \ + ec2 + fi + fi + + # S3 + if is_service_enabled n-obj swift3; then + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + openstack service create \ + --type s3 \ + --description "S3" \ + s3 + openstack endpoint create \ + --region RegionOne \ + --publicurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ + --adminurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ + --internalurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ + s3 + fi + fi } # create_nova_conf() - Create a new nova.conf file diff --git a/lib/tempest b/lib/tempest index 16f8744d85..897efa8a8f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -358,6 +358,30 @@ function configure_tempest { $errexit } +# create_tempest_accounts() - Set up common required tempest accounts + +# Project User Roles +# ------------------------------------------------------------------ +# alt_demo alt_demo Member + +# Migrated from keystone_data.sh +function create_tempest_accounts { + if is_service_enabled tempest; then + # Tempest has some tests that validate various authorization checks + # between two regular users in separate tenants + openstack project create \ + alt_demo + openstack user create \ + --project alt_demo \ + --password "$ADMIN_PASSWORD" \ + alt_demo + openstack role add \ + --project alt_demo \ + --user alt_demo \ + Member + fi +} + # install_tempest() - Collect source and prepare function install_tempest { git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH diff --git a/stack.sh b/stack.sh index c990a1c6ca..f8973ee98f 100755 --- a/stack.sh +++ b/stack.sh @@ -907,14 +907,13 @@ if is_service_enabled key; then SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0 fi - # Do the keystone-specific bits from keystone_data.sh - export OS_SERVICE_TOKEN=$SERVICE_TOKEN - export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT - # Add temporarily to make openstackclient work + # Setup OpenStackclient token-flow auth export OS_TOKEN=$SERVICE_TOKEN export OS_URL=$SERVICE_ENDPOINT + create_keystone_accounts create_nova_accounts + create_glance_accounts create_cinder_accounts create_neutron_accounts @@ -922,7 +921,7 @@ if is_service_enabled key; then create_ceilometer_accounts fi - if is_service_enabled swift || is_service_enabled s-proxy; then + if is_service_enabled swift; then create_swift_accounts fi @@ -930,20 +929,14 @@ if is_service_enabled key; then create_heat_accounts fi - # ``keystone_data.sh`` creates services, admin and demo users, and roles. - ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ - SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ - S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \ - DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \ - bash -x $FILES/keystone_data.sh - - # Set up auth creds now that keystone is bootstrapped + # Begone token-flow auth unset OS_TOKEN OS_URL + + # Set up password-flow auth creds now that keystone is bootstrapped export OS_AUTH_URL=$SERVICE_ENDPOINT export OS_TENANT_NAME=admin export OS_USERNAME=admin export OS_PASSWORD=$ADMIN_PASSWORD - unset OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT fi From 936284b02ab6365bb0bcde49b617a57a902d491c Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 11 Mar 2014 09:35:55 +1100 Subject: [PATCH 0259/4119] Make mongo install for ceilometer NOPRIME mongodb packages are missing on some platforms, so we switch to a manual install. Also gate the mongo call in cleanup Change-Id: I1755e461c66be30da3db2a0994f908503c4c38ea --- files/apts/ceilometer-collector | 4 ++-- files/rpms/ceilometer-collector | 4 ++-- lib/ceilometer | 21 ++++++++++++++++++--- 3 files changed, 22 insertions(+), 7 deletions(-) diff --git a/files/apts/ceilometer-collector b/files/apts/ceilometer-collector index 71007ba4c5..f1b692ac71 100644 --- a/files/apts/ceilometer-collector +++ b/files/apts/ceilometer-collector @@ -1,5 +1,5 @@ -python-pymongo -mongodb-server +python-pymongo #NOPRIME +mongodb-server #NOPRIME libnspr4-dev pkg-config libxml2-dev diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector index c91bac36a2..9cf580d22d 100644 --- a/files/rpms/ceilometer-collector +++ b/files/rpms/ceilometer-collector @@ -1,4 +1,4 @@ selinux-policy-targeted -mongodb-server -pymongo +mongodb-server #NOPRIME +pymongo # NOPRIME mongodb # NOPRIME diff --git a/lib/ceilometer b/lib/ceilometer index b0899e2f24..6aaddcefad 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -106,7 +106,9 @@ create_ceilometer_accounts() { # cleanup_ceilometer() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_ceilometer { - mongo ceilometer --eval "db.dropDatabase();" + if [ "$CEILOMETER_BACKEND" != 'mysql' ] && [ "$CEILOMETER_BACKEND" != 'postgresql' ] ; then + mongo ceilometer --eval "db.dropDatabase();" + fi } # configure_ceilometerclient() - Set config files, create data dirs, etc @@ -164,14 +166,27 @@ function configure_ceilometer { } function configure_mongodb { + # server package is the same on all + local packages=mongodb-server + + if is_fedora; then + # mongodb client + python bindings + packages="${packages} mongodb pymongo" + else + packages="${packages} python-pymongo" + fi + + install_package ${packages} + if is_fedora; then - # install mongodb client - install_package mongodb # ensure smallfiles selected to minimize freespace requirements sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod restart_service mongod fi + + # give mongodb time to start-up + sleep 5 } # init_ceilometer() - Initialize etc. From ccb3d10e04f7be773daf1bddd0bc2bff024ce6f4 Mon Sep 17 00:00:00 2001 From: Newell Jensen Date: Mon, 10 Mar 2014 14:28:52 -0700 Subject: [PATCH 0260/4119] Makes error message easier to understand. If the host ip address is indeterminate while executing stack.sh, an error message is displayed. This error message could be a source of confusion since it references localrc, which is depreciated. This patch makes the error message clearer and easier to understand. It does this by taking out the reference to localrc. It also points the user towards local.conf where there are suggestions on how to set HOST_IP. Change-Id: I41f14a2de85449d2a08ab7eb2849844a1087b147 Closes-Bug: #1290556 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 148ce04e28..817da26a8e 100755 --- a/stack.sh +++ b/stack.sh @@ -289,7 +289,7 @@ FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} HOST_IP=$(get_default_host_ip $FIXED_RANGE $FLOATING_RANGE "$HOST_IP_IFACE" "$HOST_IP") if [ "$HOST_IP" == "" ]; then - die $LINENO "Could not determine host ip address. Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted" + die $LINENO "Could not determine host ip address. See local.conf for suggestions on setting HOST_IP." fi # Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints. From c20bab89c47e02d88fb314d4d0a8dbfc73fca20e Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Tue, 11 Mar 2014 11:38:24 +0100 Subject: [PATCH 0261/4119] Use the python-pyOpenSSL package openSUSE Recent pyOpenSSL releases when installed from pip depend on cryptography>=0.2.1, which itself depends on cffi>=0.8. That is conflicting with the python-cffi (0.7.2) package on openSUSE-13.1 which is required by the installed python-xattr. Change-Id: I721ce5288d150a3b01fb2558f7ca86028d734138 --- files/rpms-suse/glance | 1 + 1 file changed, 1 insertion(+) diff --git a/files/rpms-suse/glance b/files/rpms-suse/glance index dd68ac08c8..d9844e9bb4 100644 --- a/files/rpms-suse/glance +++ b/files/rpms-suse/glance @@ -8,5 +8,6 @@ python-devel python-eventlet python-greenlet python-iso8601 +python-pyOpenSSL python-wsgiref python-xattr From 3b1f2e4e885559957a939f8a260b4cff9938bc80 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Mon, 24 Feb 2014 20:30:07 +0900 Subject: [PATCH 0262/4119] Fix inverted conditionals in setup_develop This fixes regressions introduced by: Change-Id: Ic97e68348f46245b271567893b447fcedbd7bd6e ("Handle non-zero exit code from git diff") Change-Id: I053a292c287f3035eef37db2264eda06a170f9bc Closes-Bug: 1287513 --- functions-common | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index ed3d8832fd..90cd3dfa72 100644 --- a/functions-common +++ b/functions-common @@ -1248,7 +1248,7 @@ function setup_develop { # ``errexit`` requires us to trap the exit code when the repo is changed local update_requirements=$(cd $project_dir && git diff --exit-code >/dev/null || echo "changed") - if [[ $update_requirements = "changed" ]]; then + if [[ $update_requirements != "changed" ]]; then (cd $REQUIREMENTS_DIR; \ $SUDO_CMD python update.py $project_dir) fi @@ -1264,7 +1264,7 @@ function setup_develop { # a variable that tells us whether or not we should UNDO the requirements # changes (this will be set to False in the OpenStack ci gate) if [ $UNDO_REQUIREMENTS = "True" ]; then - if [[ $update_requirements = "changed" ]]; then + if [[ $update_requirements != "changed" ]]; then (cd $project_dir && git reset --hard) fi fi From dd304603e011160f7f796ec4af7dcaf50008372c Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 11 Mar 2014 16:38:57 -0400 Subject: [PATCH 0263/4119] put libvirt debug behind a flag only turn on the libvirt debugging if we really need it, which we could control in the gate via devstack-gate. Change-Id: I5e6d41d5333357608ab6a614610c060400f70a10 --- lib/nova_plugins/hypervisor-libvirt | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 26880e5850..5a51f33808 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -25,6 +25,8 @@ set +o xtrace # File injection is disabled by default in Nova. This will turn it back on. ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False} +# if we should turn on massive libvirt debugging +DEBUG_LIBVIRT=$(trueorfalse False $DEBUG_LIBVIRT) # Entry Points @@ -104,13 +106,15 @@ EOF add_user_to_group $STACK_USER $LIBVIRT_GROUP # Enable server side traces for libvirtd - local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" - local log_outputs="1:file:/var/log/libvirt/libvirtd.log" - if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then - echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf - fi - if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then - echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf + if [[ "$DEBUG_LIBVIRT" = "True" ]] ; then + local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" + local log_outputs="1:file:/var/log/libvirt/libvirtd.log" + if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then + echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi + if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then + echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi fi # libvirt detects various settings on startup, as we potentially changed From d78c4057d4ae53a994eefb4d4b0ee01a9365e5d5 Mon Sep 17 00:00:00 2001 From: Hemanth Ravi Date: Sun, 26 Jan 2014 17:30:11 -0800 Subject: [PATCH 0264/4119] Install script for One Convergence Neutron plugin. Change-Id: I1dcc625a7c986e7533820b01af9eee5b8addcffe Implements: install for blueprint oc-nvsd-neutron-plugin --- lib/neutron_plugins/oneconvergence | 76 ++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 lib/neutron_plugins/oneconvergence diff --git a/lib/neutron_plugins/oneconvergence b/lib/neutron_plugins/oneconvergence new file mode 100644 index 0000000000..0aebff629c --- /dev/null +++ b/lib/neutron_plugins/oneconvergence @@ -0,0 +1,76 @@ +# Neutron One Convergence plugin +# --------------------------- +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/neutron_plugins/ovs_base + +Q_L3_ENABLED=true +Q_L3_ROUTER_PER_TENANT=true +Q_USE_NAMESPACE=true + +function neutron_plugin_install_agent_packages { + _neutron_ovs_base_install_agent_packages +} +# Configure common parameters +function neutron_plugin_configure_common { + + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/oneconvergence + Q_PLUGIN_CONF_FILENAME=nvsdplugin.ini + Q_PLUGIN_CLASS="neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2" + Q_DB_NAME='oc_nvsd_neutron' +} + +# Configure plugin specific information +function neutron_plugin_configure_service { + iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_ip $NVSD_IP + iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_port $NVSD_PORT + iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_user $NVSD_USER + iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_passwd $NVSD_PASSWD +} + +function neutron_plugin_configure_debug_command { + _neutron_ovs_base_configure_debug_command +} + +function neutron_plugin_setup_interface_driver { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver +} + +function has_neutron_plugin_security_group { + # 1 means False here + return 0 +} + +function setup_integration_bridge { + _neutron_ovs_base_setup_bridge $OVS_BRIDGE +} + +function neutron_plugin_configure_dhcp_agent { + setup_integration_bridge + iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport +} + +function neutron_plugin_configure_l3_agent { + _neutron_ovs_base_configure_l3_agent + iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport +} + +function neutron_plugin_configure_plugin_agent { + + AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nvsd-agent" + + _neutron_ovs_base_configure_firewall_driver +} + +function neutron_plugin_create_nova_conf { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + if ( is_service_enabled n-cpu && ! ( is_service_enabled q-dhcp )) ; then + setup_integration_bridge + fi +} + +# Restore xtrace +$MY_XTRACE From 7d4c7e09b4882077471c3b2cb097c237c2016f96 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 12 Mar 2014 08:05:08 -0400 Subject: [PATCH 0265/4119] remove docker from devstack with I1c9bea2fdeebc4199c4f7d8fca4580a6fb7fed5b nova removed docker from it's driver tree. We shouldn't have driver support inside of devstack that's not part of upstream projects (this has been a line we've been pretty clear on with Neutron drivers in the past). Remove docker driver accordingly. Change-Id: Ib91d415ea1616d99a5c5e7bc3b9015392fda5847 --- README.md | 6 +- exercises/boot_from_volume.sh | 3 - exercises/euca.sh | 3 - exercises/floating_ips.sh | 3 - exercises/sec_groups.sh | 3 - exercises/volumes.sh | 3 - lib/nova_plugins/hypervisor-docker | 132 ----------------------------- stackrc | 3 - tools/docker/README.md | 13 --- tools/docker/install_docker.sh | 68 --------------- 10 files changed, 1 insertion(+), 236 deletions(-) delete mode 100644 lib/nova_plugins/hypervisor-docker delete mode 100644 tools/docker/README.md delete mode 100755 tools/docker/install_docker.sh diff --git a/README.md b/README.md index 9914b1ed69..a0f5b2689d 100644 --- a/README.md +++ b/README.md @@ -73,7 +73,7 @@ does not run if started as root. This is a recent change (Oct 2013) from the previous behaviour of automatically creating a ``stack`` user. Automatically creating user accounts is not the right response to running as root, so -that bit is now an explicit step using ``tools/create-stack-user.sh``. +that bit is now an explicit step using ``tools/create-stack-user.sh``. Run that (as root!) or just check it out to see what DevStack's expectations are for the account it runs under. Many people simply use their usual login (the default 'ubuntu' login on a UEC image @@ -253,10 +253,6 @@ If tempest has been successfully configured, a basic set of smoke tests can be r If you would like to use Xenserver as the hypervisor, please refer to the instructions in `./tools/xen/README.md`. -# DevStack on Docker - -If you would like to use Docker as the hypervisor, please refer to the instructions in `./tools/docker/README.md`. - # Additional Projects DevStack has a hook mechanism to call out to a dispatch script at specific diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index f679669eea..dff8e7a632 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -44,9 +44,6 @@ source $TOP_DIR/exerciserc # the exercise is skipped is_service_enabled cinder || exit 55 -# Also skip if the hypervisor is Docker -[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 - # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/exercises/euca.sh b/exercises/euca.sh index ad852a4f79..3768b56d4e 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -40,9 +40,6 @@ source $TOP_DIR/exerciserc # the exercise is skipped is_service_enabled n-api || exit 55 -# Skip if the hypervisor is Docker -[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 - # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 8b7b96197e..1416d4dc6a 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -40,9 +40,6 @@ source $TOP_DIR/exerciserc # the exercise is skipped is_service_enabled n-api || exit 55 -# Skip if the hypervisor is Docker -[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 - # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh index d71a1e0755..5f8b0a4d5d 100755 --- a/exercises/sec_groups.sh +++ b/exercises/sec_groups.sh @@ -37,9 +37,6 @@ source $TOP_DIR/exerciserc # the exercise is skipped is_service_enabled n-api || exit 55 -# Skip if the hypervisor is Docker -[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 - # Testing Security Groups # ======================= diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 83d25c779c..0d556df9e7 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -41,9 +41,6 @@ source $TOP_DIR/exerciserc # exercise is skipped. is_service_enabled cinder || exit 55 -# Also skip if the hypervisor is Docker -[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 - # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker deleted file mode 100644 index fd3c4fefc8..0000000000 --- a/lib/nova_plugins/hypervisor-docker +++ /dev/null @@ -1,132 +0,0 @@ -# lib/nova_plugins/docker -# Configure the Docker hypervisor - -# Enable with: -# -# VIRT_DRIVER=docker - -# Dependencies: -# -# - ``functions`` file -# - ``nova`` and ``glance`` configurations - -# install_nova_hypervisor - install any external requirements -# configure_nova_hypervisor - make configuration changes, including those to other services -# start_nova_hypervisor - start any external services -# stop_nova_hypervisor - stop any external services -# cleanup_nova_hypervisor - remove transient data and cache - -# Save trace setting -MY_XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default directories -DOCKER_DIR=$DEST/docker - -DOCKER_UNIX_SOCKET=/var/run/docker.sock -DOCKER_PID_FILE=/var/run/docker.pid -DOCKER_REGISTRY_PORT=${DOCKER_REGISTRY_PORT:-5042} - -DOCKER_IMAGE=${DOCKER_IMAGE:-cirros:latest} -DOCKER_IMAGE_NAME=$DEFAULT_IMAGE_NAME -DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-registry:latest} -DOCKER_REGISTRY_IMAGE_NAME=registry -DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME} - -DOCKER_APT_REPO=${DOCKER_APT_REPO:-https://get.docker.io/ubuntu} - - -# Entry Points -# ------------ - -# clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor { - stop_service docker - - # Clean out work area - sudo rm -rf /var/lib/docker -} - -# configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor { - iniset $NOVA_CONF DEFAULT compute_driver docker.DockerDriver - iniset $GLANCE_API_CONF DEFAULT container_formats ami,ari,aki,bare,ovf,docker -} - -# is_docker_running - Return 0 (true) if Docker is running, otherwise 1 -function is_docker_running { - local docker_pid - if [ -f "$DOCKER_PID_FILE" ]; then - docker_pid=$(cat "$DOCKER_PID_FILE") - fi - if [[ -z "$docker_pid" ]] || ! ps -p "$docker_pid" | grep [d]ocker; then - return 1 - fi - return 0 -} - -# install_nova_hypervisor() - Install external components -function install_nova_hypervisor { - # So far this is Ubuntu only - if ! is_ubuntu; then - die $LINENO "Docker is only supported on Ubuntu at this time" - fi - - # Make sure Docker is installed - if ! is_package_installed lxc-docker; then - die $LINENO "Docker is not installed. Please run tools/docker/install_docker.sh" - fi - - if ! (is_docker_running); then - die $LINENO "Docker not running" - fi -} - -# start_nova_hypervisor - Start any required external services -function start_nova_hypervisor { - if ! (is_docker_running); then - die $LINENO "Docker not running" - fi - - # Start the Docker registry container - docker run -d -p ${DOCKER_REGISTRY_PORT}:5000 \ - -e SETTINGS_FLAVOR=openstack -e OS_USERNAME=${OS_USERNAME} \ - -e OS_PASSWORD=${OS_PASSWORD} -e OS_TENANT_NAME=${OS_TENANT_NAME} \ - -e OS_GLANCE_URL="${SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" \ - -e OS_AUTH_URL=${OS_AUTH_URL} \ - $DOCKER_REGISTRY_IMAGE_NAME ./docker-registry/run.sh - - echo "Waiting for docker registry to start..." - DOCKER_REGISTRY=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT} - if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl -s $DOCKER_REGISTRY; do sleep 1; done"; then - die $LINENO "docker-registry did not start" - fi - - # Tag image if not already tagged - if ! docker images | grep $DOCKER_REPOSITORY_NAME; then - docker tag $DOCKER_IMAGE_NAME $DOCKER_REPOSITORY_NAME - fi - - # Make sure we copied the image in Glance - if ! (glance image-show "$DOCKER_IMAGE"); then - docker push $DOCKER_REPOSITORY_NAME - fi -} - -# stop_nova_hypervisor - Stop any external services -function stop_nova_hypervisor { - # Stop the docker registry container - docker kill $(docker ps | grep docker-registry | cut -d' ' -f1) -} - - -# Restore xtrace -$MY_XTRACE - -# Local variables: -# mode: shell-script -# End: diff --git a/stackrc b/stackrc index 6bb6f37195..756ec275dc 100644 --- a/stackrc +++ b/stackrc @@ -320,9 +320,6 @@ case "$VIRT_DRIVER" in openvz) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64} IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz"};; - docker) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros} - IMAGE_URLS=${IMAGE_URLS:-};; libvirt) case "$LIBVIRT_TYPE" in lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc diff --git a/tools/docker/README.md b/tools/docker/README.md deleted file mode 100644 index 976111f750..0000000000 --- a/tools/docker/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# DevStack on Docker - -Using Docker as Nova's hypervisor requries two steps: - -* Configure DevStack by adding the following to `localrc`:: - - VIRT_DRIVER=docker - -* Download and install the Docker service and images:: - - tools/docker/install_docker.sh - -After this, `stack.sh` should run as normal. diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh deleted file mode 100755 index 27c8c8210b..0000000000 --- a/tools/docker/install_docker.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env bash - -# **install_docker.sh** - Do the initial Docker installation and configuration - -# install_docker.sh -# -# Install docker package and images -# * downloads a base busybox image and a glance registry image if necessary -# * install the images in Docker's image cache - - -# Keep track of the current directory -SCRIPT_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $SCRIPT_DIR/../..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Load local configuration -source $TOP_DIR/stackrc - -FILES=$TOP_DIR/files - -# Get our defaults -source $TOP_DIR/lib/nova_plugins/hypervisor-docker - -SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} - - -# Install Docker Service -# ====================== - -if is_fedora; then - install_package docker-io socat -else - # Stop the auto-repo updates and do it when required here - NO_UPDATE_REPOS=True - - # Set up home repo - curl https://get.docker.io/gpg | sudo apt-key add - - install_package python-software-properties && \ - sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list" - apt_get update - install_package --force-yes lxc-docker socat -fi - -# Start the daemon - restart just in case the package ever auto-starts... -restart_service docker - -echo "Waiting for docker daemon to start..." -DOCKER_GROUP=$(groups | cut -d' ' -f1) -CONFIGURE_CMD="while ! /bin/echo -e 'GET /v1.3/version HTTP/1.0\n\n' | socat - unix-connect:$DOCKER_UNIX_SOCKET 2>/dev/null | grep -q '200 OK'; do - # Set the right group on docker unix socket before retrying - sudo chgrp $DOCKER_GROUP $DOCKER_UNIX_SOCKET - sudo chmod g+rw $DOCKER_UNIX_SOCKET - sleep 1 -done" -if ! timeout $SERVICE_TIMEOUT sh -c "$CONFIGURE_CMD"; then - die $LINENO "docker did not start" -fi - -# Get guest container image -docker pull $DOCKER_IMAGE -docker tag $DOCKER_IMAGE $DOCKER_IMAGE_NAME - -# Get docker-registry image -docker pull $DOCKER_REGISTRY_IMAGE -docker tag $DOCKER_REGISTRY_IMAGE $DOCKER_REGISTRY_IMAGE_NAME From 1749106c3abb17ee7cf30eb69bc9b744f3fc5a95 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Wed, 12 Mar 2014 14:38:25 +0100 Subject: [PATCH 0266/4119] Remove unused package dependencies * /sbin/vconfig command is not used by either nova or neutron. * Now the AMQP carrot is not used, not even optionally by the oslo.messaging. * python-gfalgs just referenced as a similar configuration style, by neutron. Change-Id: Idde5446e47e7da1dd204ea518ab816e2cce77c7d --- files/apts/nova | 2 -- files/rpms-suse/nova | 2 -- files/rpms/neutron | 1 - files/rpms/nova | 3 --- 4 files changed, 8 deletions(-) diff --git a/files/apts/nova b/files/apts/nova index ae925c3293..dfb25c7f37 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -25,7 +25,6 @@ socat # used by ajaxterm python-mox python-paste python-migrate -python-gflags python-greenlet python-libvirt # NOPRIME python-libxml2 @@ -34,7 +33,6 @@ python-numpy # used by websockify for spice console python-pastedeploy python-eventlet python-cheetah -python-carrot python-tempita python-sqlalchemy python-suds diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova index ee4917d702..c3c878fb4a 100644 --- a/files/rpms-suse/nova +++ b/files/rpms-suse/nova @@ -24,7 +24,6 @@ python-Routes python-SQLAlchemy python-Tempita python-boto -python-carrot python-cheetah python-eventlet python-feedparser @@ -37,7 +36,6 @@ python-mox python-mysql python-numpy # needed by websockify for spice console python-paramiko -python-python-gflags python-sqlalchemy-migrate python-suds python-xattr # needed for glance which is needed for nova --- this shouldn't be here diff --git a/files/rpms/neutron b/files/rpms/neutron index 42d7f68d37..e5c901be37 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron @@ -21,4 +21,3 @@ rabbitmq-server # NOPRIME qpid-cpp-server # NOPRIME sqlite sudo -vconfig diff --git a/files/rpms/nova b/files/rpms/nova index a607d925e1..61b0e9a0d1 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -17,11 +17,9 @@ mysql-server # NOPRIME parted polkit python-boto -python-carrot python-cheetah python-eventlet python-feedparser -python-gflags python-greenlet python-iso8601 python-kombu @@ -42,4 +40,3 @@ rabbitmq-server # NOPRIME qpid-cpp-server # NOPRIME sqlite sudo -vconfig From 64bd01652e6fd7c593498b1fd2bf50bfdf64ce40 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 12 Mar 2014 13:04:22 -0400 Subject: [PATCH 0267/4119] make git_clone safer the ensures that if the function returns early, we return to a sane directory, and not hang out somewhere that a future git call might modify a directory in a weird way. This is especially important in the case of stable branches where were are hopping between stable for servers and master for clients. Change-Id: Ib8ebbc23b1813bc1bfb31d0a079f1b882135bd39 --- functions-common | 3 +++ 1 file changed, 3 insertions(+) diff --git a/functions-common b/functions-common index 90cd3dfa72..c6fd5c7163 100644 --- a/functions-common +++ b/functions-common @@ -517,12 +517,14 @@ function git_clone { GIT_DEST=$2 GIT_REF=$3 RECLONE=$(trueorfalse False $RECLONE) + local orig_dir=`pwd` if [[ "$OFFLINE" = "True" ]]; then echo "Running in offline mode, clones already exist" # print out the results so we know what change was used in the logs cd $GIT_DEST git show --oneline | head -1 + cd $orig_dir return fi @@ -572,6 +574,7 @@ function git_clone { # print out the results so we know what change was used in the logs cd $GIT_DEST git show --oneline | head -1 + cd $orig_dir } # git can sometimes get itself infinitely stuck with transient network From 767b5a45b7c6a91a449e0cb41baf16221a7de5e1 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 12 Mar 2014 10:33:15 -0700 Subject: [PATCH 0268/4119] Split up stop_nova to match start_nova Split stop_nova into: stop_nova_compute and stop_nova_rest. This is needed to support the partial-ncpu grenade test where we want to stop everything but nova_compute. Change-Id: I6a21821277e56897d705ca5746806e2211632d12 --- lib/nova | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/lib/nova b/lib/nova index 55103e8dcc..15f56d336b 100644 --- a/lib/nova +++ b/lib/nova @@ -715,17 +715,25 @@ function start_nova { start_nova_rest } -# stop_nova() - Stop running processes (non-screen) -function stop_nova { +function stop_nova_compute { + if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + stop_nova_hypervisor + fi +} + +function stop_nova_rest { # Kill the nova screen windows # Some services are listed here twice since more than one instance # of a service may be running in certain configs. for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do screen_stop $serv done - if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then - stop_nova_hypervisor - fi +} + +# stop_nova() - Stop running processes (non-screen) +function stop_nova { + stop_nova_rest + stop_nova_compute } From 9c6d2840fdb67eb7af34be241bdb2fbebaf67c87 Mon Sep 17 00:00:00 2001 From: Sreeram Yerrapragada Date: Mon, 10 Mar 2014 14:12:58 -0700 Subject: [PATCH 0269/4119] fix failing wget statements under -o errexit in vmdk upload routine Fix the case when uploaded image has no descriptor. Refactored the code a bit Tested: 1. monithic Sparse 2. monolithic flat 2.1 flat file name mentioned in descriptor file 2.1 flat file name not mentioned in descriptor file 3. descriptor header not found in the file 3.1 image file name is *-flat, download descriptor 3.2 image file name does not end with *-flat 4. file name contains all image properties Change-Id: I0df9be5c2a1b9ed53cdb22d5cd40b94e56c48f37 Closes-bug: #1289664 --- functions | 63 ++++++++++++++++++++----------------------------------- 1 file changed, 23 insertions(+), 40 deletions(-) diff --git a/functions b/functions index 1d30922916..e0d2b01d0c 100644 --- a/functions +++ b/functions @@ -122,7 +122,7 @@ function upload_image { flat_fname="$(head -25 $IMAGE | { grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE || true; })" flat_fname="${flat_fname#*\"}" flat_fname="${flat_fname%?}" - if [[ -z "$flat_name" ]]; then + if [[ -z "$flat_fname" ]]; then flat_fname="$IMAGE_NAME-flat.vmdk" fi path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` @@ -133,27 +133,16 @@ function upload_image { if [[ ! -f $FILES/$flat_fname || \ "$(stat -c "%s" $FILES/$flat_fname)" = "0" ]]; then wget -c $flat_url -O $FILES/$flat_fname - if [[ $? -ne 0 ]]; then - echo "Flat disk not found: $flat_url" - flat_found=false - fi - fi - if $flat_found; then - IMAGE="$FILES/${flat_fname}" fi + IMAGE="$FILES/${flat_fname}" else IMAGE=$(echo $flat_url | sed "s/^file:\/\///g") if [[ ! -f $IMAGE || "$(stat -c "%s" $IMAGE)" == "0" ]]; then echo "Flat disk not found: $flat_url" - flat_found=false + return 1 fi - if ! $flat_found; then - IMAGE=$(echo $image_url | sed "s/^file:\/\///g") - fi - fi - if $flat_found; then - IMAGE_NAME="${flat_fname}" fi + IMAGE_NAME="${flat_fname}" vmdk_disktype="preallocated" elif [[ "$vmdk_create_type" = "streamOptimized" ]]; then vmdk_disktype="streamOptimized" @@ -163,33 +152,27 @@ function upload_image { if [[ ${IMAGE_NAME: -5} != "-flat" ]]; then warn $LINENO "Expected filename suffix: '-flat'."` `" Filename provided: ${IMAGE_NAME}" - fi - - descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk" - path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` - flat_path="${image_url:0:$path_len}" - descriptor_url=$flat_path$descriptor_fname - warn $LINENO "$descriptor_data_pair_msg"` - `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" - if [[ $flat_path != file* ]]; then - if [[ ! -f $FILES/$descriptor_fname || \ - "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then - wget -c $descriptor_url -O $FILES/$descriptor_fname - if [[ $? -ne 0 ]]; then - warn $LINENO "Descriptor not found $descriptor_url" - descriptor_found=false - fi - fi - descriptor_url="$FILES/$descriptor_fname" else - descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g") - if [[ ! -f $descriptor_url || \ - "$(stat -c "%s" $descriptor_url)" == "0" ]]; then - warn $LINENO "Descriptor not found $descriptor_url" - descriptor_found=false + descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk" + path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` + flat_path="${image_url:0:$path_len}" + descriptor_url=$flat_path$descriptor_fname + warn $LINENO "$descriptor_data_pair_msg"` + `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" + if [[ $flat_path != file* ]]; then + if [[ ! -f $FILES/$descriptor_fname || \ + "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then + wget -c $descriptor_url -O $FILES/$descriptor_fname + fi + descriptor_url="$FILES/$descriptor_fname" + else + descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g") + if [[ ! -f $descriptor_url || \ + "$(stat -c "%s" $descriptor_url)" == "0" ]]; then + echo "Descriptor not found: $descriptor_url" + return 1 + fi fi - fi - if $descriptor_found; then vmdk_adapter_type="$(head -25 $descriptor_url | { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })" vmdk_adapter_type="${vmdk_adapter_type#*\"}" vmdk_adapter_type="${vmdk_adapter_type%?}" From 7ff8443e46c94562822895b86b24122bc7474cfd Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Mon, 10 Mar 2014 20:04:51 +0400 Subject: [PATCH 0270/4119] Rename all Savanna usages to Sahara There are several backward compatibility nits. Change-Id: I93cac543375896602d158860cc557f86e41bcb63 --- exercises/{savanna.sh => sahara.sh} | 8 +- extras.d/70-sahara.sh | 37 ++++++ extras.d/70-savanna.sh | 37 ------ lib/sahara | 177 ++++++++++++++++++++++++++++ lib/sahara-dashboard | 72 +++++++++++ lib/savanna | 173 --------------------------- lib/savanna-dashboard | 72 ----------- 7 files changed, 290 insertions(+), 286 deletions(-) rename exercises/{savanna.sh => sahara.sh} (88%) create mode 100644 extras.d/70-sahara.sh delete mode 100644 extras.d/70-savanna.sh create mode 100644 lib/sahara create mode 100644 lib/sahara-dashboard delete mode 100644 lib/savanna delete mode 100644 lib/savanna-dashboard diff --git a/exercises/savanna.sh b/exercises/sahara.sh similarity index 88% rename from exercises/savanna.sh rename to exercises/sahara.sh index fc3f9760e5..867920ed31 100755 --- a/exercises/savanna.sh +++ b/exercises/sahara.sh @@ -1,8 +1,8 @@ #!/usr/bin/env bash -# **savanna.sh** +# **sahara.sh** -# Sanity check that Savanna started if enabled +# Sanity check that Sahara started if enabled echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -33,9 +33,9 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc -is_service_enabled savanna || exit 55 +is_service_enabled sahara || exit 55 -curl http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Savanna API not functioning!" +curl http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Sahara API isn't functioning!" set +o xtrace echo "*********************************************************************" diff --git a/extras.d/70-sahara.sh b/extras.d/70-sahara.sh new file mode 100644 index 0000000000..80e07ff7b9 --- /dev/null +++ b/extras.d/70-sahara.sh @@ -0,0 +1,37 @@ +# sahara.sh - DevStack extras script to install Sahara + +if is_service_enabled sahara; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/sahara + source $TOP_DIR/lib/sahara-dashboard + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + echo_summary "Installing sahara" + install_sahara + cleanup_sahara + if is_service_enabled horizon; then + install_sahara_dashboard + fi + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + echo_summary "Configuring sahara" + configure_sahara + create_sahara_accounts + if is_service_enabled horizon; then + configure_sahara_dashboard + fi + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + echo_summary "Initializing sahara" + start_sahara + fi + + if [[ "$1" == "unstack" ]]; then + stop_sahara + if is_service_enabled horizon; then + cleanup_sahara_dashboard + fi + fi + + if [[ "$1" == "clean" ]]; then + cleanup_sahara + fi +fi diff --git a/extras.d/70-savanna.sh b/extras.d/70-savanna.sh deleted file mode 100644 index edc1376deb..0000000000 --- a/extras.d/70-savanna.sh +++ /dev/null @@ -1,37 +0,0 @@ -# savanna.sh - DevStack extras script to install Savanna - -if is_service_enabled savanna; then - if [[ "$1" == "source" ]]; then - # Initial source - source $TOP_DIR/lib/savanna - source $TOP_DIR/lib/savanna-dashboard - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing Savanna" - install_savanna - cleanup_savanna - if is_service_enabled horizon; then - install_savanna_dashboard - fi - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring Savanna" - configure_savanna - create_savanna_accounts - if is_service_enabled horizon; then - configure_savanna_dashboard - fi - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - echo_summary "Initializing Savanna" - start_savanna - fi - - if [[ "$1" == "unstack" ]]; then - stop_savanna - if is_service_enabled horizon; then - cleanup_savanna_dashboard - fi - fi - - if [[ "$1" == "clean" ]]; then - cleanup_savanna - fi -fi diff --git a/lib/sahara b/lib/sahara new file mode 100644 index 0000000000..4cb04ecd3a --- /dev/null +++ b/lib/sahara @@ -0,0 +1,177 @@ +# lib/sahara + +# Dependencies: +# ``functions`` file +# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# install_sahara +# configure_sahara +# start_sahara +# stop_sahara +# cleanup_sahara + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default repos +SAHARA_REPO=${SAHARA_REPO:-${GIT_BASE}/openstack/sahara.git} +SAHARA_BRANCH=${SAHARA_BRANCH:-master} + +# Set up default directories +SAHARA_DIR=$DEST/sahara +SAHARA_CONF_DIR=${SAHARA_CONF_DIR:-/etc/sahara} +SAHARA_CONF_FILE=${SAHARA_CONF_DIR}/sahara.conf +SAHARA_DEBUG=${SAHARA_DEBUG:-True} + +SAHARA_SERVICE_HOST=${SAHARA_SERVICE_HOST:-$SERVICE_HOST} +SAHARA_SERVICE_PORT=${SAHARA_SERVICE_PORT:-8386} +SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} + +SAHARA_AUTH_CACHE_DIR=${SAHARA_AUTH_CACHE_DIR:-/var/cache/sahara} + +# Support entry points installation of console scripts +if [[ -d $SAHARA_DIR/bin ]]; then + SAHARA_BIN_DIR=$SAHARA_DIR/bin +else + SAHARA_BIN_DIR=$(get_python_exec_prefix) +fi + +# Tell Tempest this project is present +TEMPEST_SERVICES+=,sahara + +# For backward compatibility with current tests in Tempest +TEMPEST_SERVICES+=,savanna + + +# Functions +# --------- + +# create_sahara_accounts() - Set up common required sahara accounts +# +# Tenant User Roles +# ------------------------------ +# service sahara admin +function create_sahara_accounts { + + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") + + SAHARA_USER=$(openstack user create \ + sahara \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email sahara@example.com \ + | grep " id " | get_field 2) + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $SAHARA_USER + + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + SAHARA_SERVICE=$(openstack service create \ + sahara \ + --type=data_processing \ + --description="Sahara Data Processing" \ + | grep " id " | get_field 2) + openstack endpoint create \ + $SAHARA_SERVICE \ + --region RegionOne \ + --publicurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ + --adminurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ + --internalurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" + fi +} + +# cleanup_sahara() - Remove residual data files, anything left over from +# previous runs that would need to clean up. +function cleanup_sahara { + + # Cleanup auth cache dir + sudo rm -rf $SAHARA_AUTH_CACHE_DIR +} + +# configure_sahara() - Set config files, create data dirs, etc +function configure_sahara { + + if [[ ! -d $SAHARA_CONF_DIR ]]; then + sudo mkdir -p $SAHARA_CONF_DIR + fi + sudo chown $STACK_USER $SAHARA_CONF_DIR + + # Copy over sahara configuration file and configure common parameters. + # TODO(slukjanov): rename when sahara internals will be updated + cp $SAHARA_DIR/etc/savanna/savanna.conf.sample $SAHARA_CONF_FILE + + # Create auth cache dir + sudo mkdir -p $SAHARA_AUTH_CACHE_DIR + sudo chown $STACK_USER $SAHARA_AUTH_CACHE_DIR + rm -rf $SAHARA_AUTH_CACHE_DIR/* + + # Set obsolete keystone auth configs for backward compatibility + iniset $SAHARA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_SERVICE_HOST + iniset $SAHARA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_SERVICE_PORT + iniset $SAHARA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_SERVICE_PROTOCOL + iniset $SAHARA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD + iniset $SAHARA_CONF_FILE DEFAULT os_admin_username sahara + iniset $SAHARA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME + + # Set actual keystone auth configs + iniset $SAHARA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset $SAHARA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $SAHARA_CONF_FILE keystone_authtoken admin_user sahara + iniset $SAHARA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD + iniset $SAHARA_CONF_FILE keystone_authtoken signing_dir $SAHARA_AUTH_CACHE_DIR + iniset $SAHARA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA + + iniset $SAHARA_CONF_FILE DEFAULT debug $SAHARA_DEBUG + + iniset $SAHARA_CONF_FILE database connection `database_connection_url sahara` + + if is_service_enabled neutron; then + iniset $SAHARA_CONF_FILE DEFAULT use_neutron true + iniset $SAHARA_CONF_FILE DEFAULT use_floating_ips true + fi + + if is_service_enabled heat; then + iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine heat + else + iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine direct + fi + + iniset $SAHARA_CONF_FILE DEFAULT use_syslog $SYSLOG + + recreate_database sahara utf8 + $SAHARA_BIN_DIR/sahara-db-manage --config-file $SAHARA_CONF_FILE upgrade head +} + +# install_sahara() - Collect source and prepare +function install_sahara { + git_clone $SAHARA_REPO $SAHARA_DIR $SAHARA_BRANCH + setup_develop $SAHARA_DIR +} + +# start_sahara() - Start running processes, including screen +function start_sahara { + screen_it sahara "cd $SAHARA_DIR && $SAHARA_BIN_DIR/sahara-api --config-file $SAHARA_CONF_FILE" +} + +# stop_sahara() - Stop running processes +function stop_sahara { + # Kill the Sahara screen windows + screen -S $SCREEN_NAME -p sahara -X kill +} + + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/sahara-dashboard b/lib/sahara-dashboard new file mode 100644 index 0000000000..a81df0f7a8 --- /dev/null +++ b/lib/sahara-dashboard @@ -0,0 +1,72 @@ +# lib/sahara-dashboard + +# Dependencies: +# +# - ``functions`` file +# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# - ``SERVICE_HOST`` + +# ``stack.sh`` calls the entry points in this order: +# +# - install_sahara_dashboard +# - configure_sahara_dashboard +# - cleanup_sahara_dashboard + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/horizon + +# Defaults +# -------- + +# Set up default repos +SAHARA_DASHBOARD_REPO=${SAHARA_DASHBOARD_REPO:-${GIT_BASE}/openstack/sahara-dashboard.git} +SAHARA_DASHBOARD_BRANCH=${SAHARA_DASHBOARD_BRANCH:-master} + +SAHARA_PYTHONCLIENT_REPO=${SAHARA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-saharaclient.git} +SAHARA_PYTHONCLIENT_BRANCH=${SAHARA_PYTHONCLIENT_BRANCH:-master} + +# Set up default directories +SAHARA_DASHBOARD_DIR=$DEST/sahara-dashboard +SAHARA_PYTHONCLIENT_DIR=$DEST/python-saharaclient + +# Functions +# --------- + +function configure_sahara_dashboard { + + echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py + echo -e "HORIZON_CONFIG['dashboards'] += ('sahara',)" >> $HORIZON_DIR/openstack_dashboard/settings.py + echo -e "INSTALLED_APPS += ('saharadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py + + if is_service_enabled neutron; then + echo -e "SAHARA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py + fi +} + +# install_sahara_dashboard() - Collect source and prepare +function install_sahara_dashboard { + install_python_saharaclient + git_clone $SAHARA_DASHBOARD_REPO $SAHARA_DASHBOARD_DIR $SAHARA_DASHBOARD_BRANCH + setup_develop $SAHARA_DASHBOARD_DIR +} + +function install_python_saharaclient { + git_clone $SAHARA_PYTHONCLIENT_REPO $SAHARA_PYTHONCLIENT_DIR $SAHARA_PYTHONCLIENT_BRANCH + setup_develop $SAHARA_PYTHONCLIENT_DIR +} + +# Cleanup file settings.py from Sahara +function cleanup_sahara_dashboard { + sed -i '/sahara/d' $HORIZON_DIR/openstack_dashboard/settings.py +} + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: + diff --git a/lib/savanna b/lib/savanna deleted file mode 100644 index 2cb092c96c..0000000000 --- a/lib/savanna +++ /dev/null @@ -1,173 +0,0 @@ -# lib/savanna - -# Dependencies: -# ``functions`` file -# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined - -# ``stack.sh`` calls the entry points in this order: -# -# install_savanna -# configure_savanna -# start_savanna -# stop_savanna -# cleanup_savanna - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default repos -SAVANNA_REPO=${SAVANNA_REPO:-${GIT_BASE}/openstack/savanna.git} -SAVANNA_BRANCH=${SAVANNA_BRANCH:-master} - -# Set up default directories -SAVANNA_DIR=$DEST/savanna -SAVANNA_CONF_DIR=${SAVANNA_CONF_DIR:-/etc/savanna} -SAVANNA_CONF_FILE=${SAVANNA_CONF_DIR}/savanna.conf -SAVANNA_DEBUG=${SAVANNA_DEBUG:-True} - -SAVANNA_SERVICE_HOST=${SAVANNA_SERVICE_HOST:-$SERVICE_HOST} -SAVANNA_SERVICE_PORT=${SAVANNA_SERVICE_PORT:-8386} -SAVANNA_SERVICE_PROTOCOL=${SAVANNA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} - -SAVANNA_AUTH_CACHE_DIR=${SAVANNA_AUTH_CACHE_DIR:-/var/cache/savanna} - -# Support entry points installation of console scripts -if [[ -d $SAVANNA_DIR/bin ]]; then - SAVANNA_BIN_DIR=$SAVANNA_DIR/bin -else - SAVANNA_BIN_DIR=$(get_python_exec_prefix) -fi - -# Tell Tempest this project is present -TEMPEST_SERVICES+=,savanna - - -# Functions -# --------- - -# create_savanna_accounts() - Set up common required savanna accounts -# -# Tenant User Roles -# ------------------------------ -# service savanna admin -function create_savanna_accounts { - - SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") - - SAVANNA_USER=$(openstack user create \ - savanna \ - --password "$SERVICE_PASSWORD" \ - --project $SERVICE_TENANT \ - --email savanna@example.com \ - | grep " id " | get_field 2) - openstack role add \ - $ADMIN_ROLE \ - --project $SERVICE_TENANT \ - --user $SAVANNA_USER - - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - SAVANNA_SERVICE=$(openstack service create \ - savanna \ - --type=data_processing \ - --description="Savanna Data Processing" \ - | grep " id " | get_field 2) - openstack endpoint create \ - $SAVANNA_SERVICE \ - --region RegionOne \ - --publicurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ - --adminurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ - --internalurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" - fi -} - -# cleanup_savanna() - Remove residual data files, anything left over from -# previous runs that would need to clean up. -function cleanup_savanna { - - # Cleanup auth cache dir - sudo rm -rf $SAVANNA_AUTH_CACHE_DIR -} - -# configure_savanna() - Set config files, create data dirs, etc -function configure_savanna { - - if [[ ! -d $SAVANNA_CONF_DIR ]]; then - sudo mkdir -p $SAVANNA_CONF_DIR - fi - sudo chown $STACK_USER $SAVANNA_CONF_DIR - - # Copy over savanna configuration file and configure common parameters. - cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_FILE - - # Create auth cache dir - sudo mkdir -p $SAVANNA_AUTH_CACHE_DIR - sudo chown $STACK_USER $SAVANNA_AUTH_CACHE_DIR - rm -rf $SAVANNA_AUTH_CACHE_DIR/* - - # Set obsolete keystone auth configs for backward compatibility - iniset $SAVANNA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_SERVICE_HOST - iniset $SAVANNA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_SERVICE_PORT - iniset $SAVANNA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_SERVICE_PROTOCOL - iniset $SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD - iniset $SAVANNA_CONF_FILE DEFAULT os_admin_username savanna - iniset $SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME - - # Set actual keystone auth configs - iniset $SAVANNA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ - iniset $SAVANNA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $SAVANNA_CONF_FILE keystone_authtoken admin_user savanna - iniset $SAVANNA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $SAVANNA_CONF_FILE keystone_authtoken signing_dir $SAVANNA_AUTH_CACHE_DIR - iniset $SAVANNA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA - - iniset $SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG - - iniset $SAVANNA_CONF_FILE database connection `database_connection_url savanna` - - if is_service_enabled neutron; then - iniset $SAVANNA_CONF_FILE DEFAULT use_neutron true - iniset $SAVANNA_CONF_FILE DEFAULT use_floating_ips true - fi - - if is_service_enabled heat; then - iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine heat - else - iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine savanna - fi - - iniset $SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG - - recreate_database savanna utf8 - $SAVANNA_BIN_DIR/savanna-db-manage --config-file $SAVANNA_CONF_FILE upgrade head -} - -# install_savanna() - Collect source and prepare -function install_savanna { - git_clone $SAVANNA_REPO $SAVANNA_DIR $SAVANNA_BRANCH - setup_develop $SAVANNA_DIR -} - -# start_savanna() - Start running processes, including screen -function start_savanna { - screen_it savanna "cd $SAVANNA_DIR && $SAVANNA_BIN_DIR/savanna-api --config-file $SAVANNA_CONF_FILE" -} - -# stop_savanna() - Stop running processes -function stop_savanna { - # Kill the Savanna screen windows - screen -S $SCREEN_NAME -p savanna -X kill -} - - -# Restore xtrace -$XTRACE - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard deleted file mode 100644 index 6fe15a3c81..0000000000 --- a/lib/savanna-dashboard +++ /dev/null @@ -1,72 +0,0 @@ -# lib/savanna-dashboard - -# Dependencies: -# -# - ``functions`` file -# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined -# - ``SERVICE_HOST`` - -# ``stack.sh`` calls the entry points in this order: -# -# - install_savanna_dashboard -# - configure_savanna_dashboard -# - cleanup_savanna_dashboard - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - -source $TOP_DIR/lib/horizon - -# Defaults -# -------- - -# Set up default repos -SAVANNA_DASHBOARD_REPO=${SAVANNA_DASHBOARD_REPO:-${GIT_BASE}/openstack/savanna-dashboard.git} -SAVANNA_DASHBOARD_BRANCH=${SAVANNA_DASHBOARD_BRANCH:-master} - -SAVANNA_PYTHONCLIENT_REPO=${SAVANNA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-savannaclient.git} -SAVANNA_PYTHONCLIENT_BRANCH=${SAVANNA_PYTHONCLIENT_BRANCH:-master} - -# Set up default directories -SAVANNA_DASHBOARD_DIR=$DEST/savanna-dashboard -SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient - -# Functions -# --------- - -function configure_savanna_dashboard { - - echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py - echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)" >> $HORIZON_DIR/openstack_dashboard/settings.py - echo -e "INSTALLED_APPS += ('savannadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py - - if is_service_enabled neutron; then - echo -e "SAVANNA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py - fi -} - -# install_savanna_dashboard() - Collect source and prepare -function install_savanna_dashboard { - install_python_savannaclient - git_clone $SAVANNA_DASHBOARD_REPO $SAVANNA_DASHBOARD_DIR $SAVANNA_DASHBOARD_BRANCH - setup_develop $SAVANNA_DASHBOARD_DIR -} - -function install_python_savannaclient { - git_clone $SAVANNA_PYTHONCLIENT_REPO $SAVANNA_PYTHONCLIENT_DIR $SAVANNA_PYTHONCLIENT_BRANCH - setup_develop $SAVANNA_PYTHONCLIENT_DIR -} - -# Cleanup file settings.py from Savanna -function cleanup_savanna_dashboard { - sed -i '/savanna/d' $HORIZON_DIR/openstack_dashboard/settings.py -} - -# Restore xtrace -$XTRACE - -# Local variables: -# mode: shell-script -# End: - From 51ebda6c8d37539473e463e8b24f27f21d798392 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Wed, 12 Mar 2014 22:26:12 +0400 Subject: [PATCH 0271/4119] Use sahara.conf.sample instead of old one Sahara internals was updated, now we can use correct conf sample. Change-Id: Ia8d99c2742785c3b5c724617a5dfc2880624a03f --- lib/sahara | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/sahara b/lib/sahara index 4cb04ecd3a..38b4ecd7e9 100644 --- a/lib/sahara +++ b/lib/sahara @@ -106,8 +106,7 @@ function configure_sahara { sudo chown $STACK_USER $SAHARA_CONF_DIR # Copy over sahara configuration file and configure common parameters. - # TODO(slukjanov): rename when sahara internals will be updated - cp $SAHARA_DIR/etc/savanna/savanna.conf.sample $SAHARA_CONF_FILE + cp $SAHARA_DIR/etc/sahara/sahara.conf.sample $SAHARA_CONF_FILE # Create auth cache dir sudo mkdir -p $SAHARA_AUTH_CACHE_DIR From 1a0c090057dde13fd3bb8ffcb84a923eb5952084 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 12 Mar 2014 14:59:50 -0500 Subject: [PATCH 0272/4119] Additional attempts to flosh stdout/stderr The logfile output is piped through awk to apply a timestamp and filter out all of the xtrace commands in the xtrace output. A while back we added fflush("") which is supposed to flush all open output files and pipes. It appears that gawk in precise is old enough that it may only flush stdout, so explicitly flush the logfile handle. Change-Id: If5198c2da2a3278eed8ae3d50c7ca5c15eac6d94 --- stack.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/stack.sh b/stack.sh index e76a55c534..a16046474c 100755 --- a/stack.sh +++ b/stack.sh @@ -541,6 +541,7 @@ if [[ -n "$LOGFILE" ]]; then print print > logfile fflush("") + fflush(logfile) }' ) 2>&1 # Set up a second fd for output exec 6> >( tee "${SUMFILE}" ) From 26c5a2252e9b99e053616d262fb627c1716a2e4d Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 12 Mar 2014 18:37:37 -0400 Subject: [PATCH 0273/4119] change the vmdk to the one used in VMWare ci The debian image that defaults to being using with vmware is huge, and it turns out it's not actually used in VMWare ci so we don't really know if it's working. Instead use the vmdk that is used in VMWare ci, which we know will boot, as we get results everyday. Change-Id: I014746af293852525e2bd128c4d19f5889ecd55d --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 6bb6f37195..cff1e26209 100644 --- a/stackrc +++ b/stackrc @@ -335,7 +335,7 @@ case "$VIRT_DRIVER" in ;; vsphere) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-debian-2.6.32-i686} - IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/debian-2.6.32-i686.vmdk"};; + IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/cirros-0.3.0-i386-disk.vmdk"};; xenserver) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-disk} IMAGE_URLS=${IMAGE_URLS:-"https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz"};; From 7eb99343979921993dc361f71b5efd77e9130f78 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 6 Feb 2014 10:33:40 +0100 Subject: [PATCH 0274/4119] Setup the correct ec2 manifest path setup correctly the path to the ec2 boundled images. Change-Id: If3bce845e009a73c6b685976de3fa6d44b907bed --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index c74f00d1ab..a3df45e81c 100644 --- a/lib/tempest +++ b/lib/tempest @@ -310,6 +310,9 @@ function configure_tempest { iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" iniset $TEMPEST_CONFIG boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}" iniset $TEMPEST_CONFIG boto s3_materials_path "$BOTO_MATERIALS_PATH" + iniset $TEMPEST_CONFIG boto ari_manifest cirros-0.3.1-x86_64-initrd.manifest.xml + iniset $TEMPEST_CONFIG boto ami_manifest cirros-0.3.1-x86_64-blank.img.manifest.xml + iniset $TEMPEST_CONFIG boto aki_manifest cirros-0.3.1-x86_64-vmlinuz.manifest.xml iniset $TEMPEST_CONFIG boto instance_type "$boto_instance_type" iniset $TEMPEST_CONFIG boto http_socket_timeout 30 iniset $TEMPEST_CONFIG boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} From 0f73ff2c516cb9fdb6849f7feb19cd0cfde46852 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 13 Mar 2014 14:20:43 -0700 Subject: [PATCH 0275/4119] Move libvirt install + setup to functions-libvirt Moves installation and setup of libvirt to a common functions-libvirt, which can be used by other drivers in the future that may require cross-distro libvirt installation and config but are not using VIRT_DRIVER=libvirt (ie, Ironic). Change-Id: I4a9255c8b4bacd5acfde9b8061c9e537aeea592c --- lib/nova_plugins/functions-libvirt | 125 ++++++++++++++++++++++++++++ lib/nova_plugins/hypervisor-libvirt | 99 +--------------------- 2 files changed, 128 insertions(+), 96 deletions(-) create mode 100644 lib/nova_plugins/functions-libvirt diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt new file mode 100644 index 0000000000..adffe010ee --- /dev/null +++ b/lib/nova_plugins/functions-libvirt @@ -0,0 +1,125 @@ +# lib/nova_plugins/functions-libvirt +# Common libvirt configuration functions + +# Dependencies: +# ``functions`` file +# ``STACK_USER`` has to be defined + +# Save trace setting +LV_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# ------- + +# if we should turn on massive libvirt debugging +DEBUG_LIBVIRT=$(trueorfalse False $DEBUG_LIBVIRT) + +# Installs required distro-specific libvirt packages. +function install_libvirt { + if is_ubuntu; then + install_package kvm + install_package libvirt-bin + install_package python-libvirt + install_package python-guestfs + elif is_fedora || is_suse; then + install_package kvm + install_package libvirt + install_package libvirt-python + install_package python-libguestfs + fi +} + +# Configures the installed libvirt system so that is accessible by +# STACK_USER via qemu:///system with management capabilities. +function configure_libvirt { + if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then + # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces + cat </dev/null; then + sudo groupadd $LIBVIRT_GROUP + fi + add_user_to_group $STACK_USER $LIBVIRT_GROUP + + # Enable server side traces for libvirtd + if [[ "$DEBUG_LIBVIRT" = "True" ]] ; then + local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" + local log_outputs="1:file:/var/log/libvirt/libvirtd.log" + if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then + echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi + if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then + echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi + fi + + # libvirt detects various settings on startup, as we potentially changed + # the system configuration (modules, filesystems), we need to restart + # libvirt to detect those changes. + restart_service $LIBVIRT_DAEMON +} + + +# Restore xtrace +$LV_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 5a51f33808..053df3cdf5 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -7,7 +7,6 @@ # Dependencies: # ``functions`` file # ``nova`` configuration -# ``STACK_USER`` has to be defined # install_nova_hypervisor - install any external requirements # configure_nova_hypervisor - make configuration changes, including those to other services @@ -19,14 +18,13 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace +source $TOP_DIR/lib/nova_plugins/functions-libvirt # Defaults # -------- # File injection is disabled by default in Nova. This will turn it back on. ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False} -# if we should turn on massive libvirt debugging -DEBUG_LIBVIRT=$(trueorfalse False $DEBUG_LIBVIRT) # Entry Points @@ -40,88 +38,7 @@ function cleanup_nova_hypervisor { # configure_nova_hypervisor - Set config files, create data dirs, etc function configure_nova_hypervisor { - if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then - # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces - cat </dev/null; then - sudo groupadd $LIBVIRT_GROUP - fi - add_user_to_group $STACK_USER $LIBVIRT_GROUP - - # Enable server side traces for libvirtd - if [[ "$DEBUG_LIBVIRT" = "True" ]] ; then - local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" - local log_outputs="1:file:/var/log/libvirt/libvirtd.log" - if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then - echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf - fi - if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then - echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf - fi - fi - - # libvirt detects various settings on startup, as we potentially changed - # the system configuration (modules, filesystems), we need to restart - # libvirt to detect those changes. - restart_service $LIBVIRT_DAEMON - + configure_libvirt iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" iniset $NOVA_CONF DEFAULT use_usb_tablet "False" @@ -150,17 +67,7 @@ EOF # install_nova_hypervisor() - Install external components function install_nova_hypervisor { - if is_ubuntu; then - install_package kvm - install_package libvirt-bin - install_package python-libvirt - install_package python-guestfs - elif is_fedora || is_suse; then - install_package kvm - install_package libvirt - install_package libvirt-python - install_package python-libguestfs - fi + install_libvirt # Install and configure **LXC** if specified. LXC is another approach to # splitting a system into many smaller parts. LXC uses cgroups and chroot From bbf759e9ed59b31258bcc8ba9fd3c79db9e57aee Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 13 Mar 2014 18:09:17 -0700 Subject: [PATCH 0276/4119] Only stop n-cpu in stop_nova_compute Move screen_stop n-cpu from stop_nova_rest to stop_nova_compute. Change-Id: I672673a55869d3f68e12c476924fc742e8260f39 --- lib/nova | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 360427d13e..b01d107525 100644 --- a/lib/nova +++ b/lib/nova @@ -716,6 +716,7 @@ function start_nova { } function stop_nova_compute { + screen_stop n-cpu if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then stop_nova_hypervisor fi @@ -725,7 +726,7 @@ function stop_nova_rest { # Kill the nova screen windows # Some services are listed here twice since more than one instance # of a service may be running in certain configs. - for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do + for serv in n-api n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do screen_stop $serv done } From 1e94eb1a3f1c87670ff4720b89f25b95e0d15e07 Mon Sep 17 00:00:00 2001 From: Steve Martinelli Date: Thu, 13 Mar 2014 23:22:39 -0500 Subject: [PATCH 0277/4119] Move from keystoneclient to openstackclient in eucarc Updating an ec2 create command to openstackclient syntax. Change-Id: I3dd21ddd52b77f3af76988db9ae6b863427d9106 --- eucarc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eucarc b/eucarc index 350235106c..343f4ccde2 100644 --- a/eucarc +++ b/eucarc @@ -22,7 +22,7 @@ source $RC_DIR/openrc export EC2_URL=$(keystone catalog --service ec2 | awk '/ publicURL / { print $4 }') # Create EC2 credentials for the current user -CREDS=$(keystone ec2-credentials-create) +CREDS=$(openstack ec2 credentials create) export EC2_ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') export EC2_SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') From 2f6c30b33c074a03748b7c0273c49fe81ab96607 Mon Sep 17 00:00:00 2001 From: Steve Martinelli Date: Thu, 13 Mar 2014 23:32:46 -0500 Subject: [PATCH 0278/4119] Update client-env to use openstackclient commands Updated the only instance of a keystoneclient command, to check if the identity service is enabled. Change-Id: If86f71c1610a79690d6c6a8eb423b6fa234372bb --- exercises/client-env.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/client-env.sh b/exercises/client-env.sh index d955e4d1e1..4e8259cd06 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -64,7 +64,7 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then STATUS_KEYSTONE="Skipped" else echo -e "\nTest Keystone" - if keystone catalog --service identity; then + if openstack endpoint show identity; then STATUS_KEYSTONE="Succeeded" else STATUS_KEYSTONE="Failed" From 4376ae04df50fb9b338039b02a94fea351cedb28 Mon Sep 17 00:00:00 2001 From: Tiago Mello Date: Fri, 14 Mar 2014 10:48:56 -0300 Subject: [PATCH 0279/4119] Clean /etc/mysql when calling clean.sh The clean.sh script should also remove the /etc/mysql directory. It contains information from the old devstack installation and may conflict with the further one. apt-get purge does not remove it since the directory is not empty. Change-Id: I885345a2311851d8746abe42e44300ecd4f6e08a --- lib/databases/mysql | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/databases/mysql b/lib/databases/mysql index f5ee3c0ed0..7a0145ae1b 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -23,6 +23,7 @@ function cleanup_database_mysql { stop_service $MYSQL apt_get purge -y mysql* sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql return elif is_fedora; then if [[ $DISTRO =~ (rhel7) ]]; then From 0b03e7acb84e14efed3bfc2b30055a8427a40a12 Mon Sep 17 00:00:00 2001 From: Rafael Folco Date: Fri, 14 Mar 2014 11:14:57 -0300 Subject: [PATCH 0280/4119] Set correct default disk bus back to virtio on ppc64 virtio is supported and should be the default disk bus on Power to take advantage of I/O performance drivers. This aligns with Nova default bus values on PowerKVM. SCSI is the default for cdrom. Change-Id: I5de08c90359b3a500c352c09c07b6b082ddb4325 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 1d30922916..e439ef6dbe 100644 --- a/functions +++ b/functions @@ -290,7 +290,7 @@ function upload_image { esac if is_arch "ppc64"; then - IMG_PROPERTY="--property hw_disk_bus=scsi --property hw_cdrom_bus=scsi" + IMG_PROPERTY="--property hw_cdrom_bus=scsi" fi if [ "$CONTAINER_FORMAT" = "bare" ]; then From 846609b627bff979ce767dd9ad00daa46a150342 Mon Sep 17 00:00:00 2001 From: Piyush Masrani Date: Fri, 14 Mar 2014 19:21:48 +0530 Subject: [PATCH 0281/4119] Devstack changes to ceilometer to support vsphere Ceilometer currently supports only libvirt when installed using devstack. Have extended this support to Vmware Vsphere in this changelist. Change-Id: I98c64204973bca5e6a7f859a5431adb2b661277f --- lib/ceilometer | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/ceilometer b/lib/ceilometer index b0899e2f24..abf4629b5e 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -161,6 +161,13 @@ function configure_ceilometer { configure_mongodb cleanup_ceilometer fi + + if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then + iniset $CEILOMETER_CONF DEFAULT hypervisor_inspector vsphere + iniset $CEILOMETER_CONF vmware host_ip "$VMWAREAPI_IP" + iniset $CEILOMETER_CONF vmware host_username "$VMWAREAPI_USER" + iniset $CEILOMETER_CONF vmware host_password "$VMWAREAPI_PASSWORD" + fi } function configure_mongodb { @@ -204,6 +211,9 @@ function start_ceilometer { if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" fi + if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then + screen_it ceilometer-acompute "cd ; ceilometer-agent-compute --config-file $CEILOMETER_CONF" + fi screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF" screen_it ceilometer-anotification "cd ; ceilometer-agent-notification --config-file $CEILOMETER_CONF" screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF" From 380587bde6444edcc8c0b3adad250de70b27ad33 Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Fri, 14 Mar 2014 12:22:18 -0400 Subject: [PATCH 0282/4119] Rollback workaround for Marconi This patch rollsback the stderr redirection in Marconi. Change-Id: Iaa2d897295cf2bc2e4a8c370d3e0592def337c78 --- lib/marconi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index 3c4547fc75..fd1c35132a 100644 --- a/lib/marconi +++ b/lib/marconi @@ -154,7 +154,7 @@ function install_marconiclient { # start_marconi() - Start running processes, including screen function start_marconi { - screen_it marconi-server "marconi-server --config-file $MARCONI_CONF 2>&1" + screen_it marconi-server "marconi-server --config-file $MARCONI_CONF" echo "Waiting for Marconi to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT/v1/health; do sleep 1; done"; then die $LINENO "Marconi did not start" From 29870cce3214766ecc208d0bb404724cf232ad69 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 14 Mar 2014 14:32:01 -0400 Subject: [PATCH 0283/4119] as is_heat_enabled this is missing, and the code assumes "heat" to be in the enabled services list otherwise. Change-Id: Ib0a7db04d8e38b58aca48261308e7c4d1fd43972 --- lib/heat | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/heat b/lib/heat index 2d9d863f0c..902333e29a 100644 --- a/lib/heat +++ b/lib/heat @@ -45,6 +45,13 @@ TEMPEST_SERVICES+=,heat # Functions # --------- +# Test if any Heat services are enabled +# is_heat_enabled +function is_heat_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"h-" ]] && return 0 + return 1 +} + # cleanup_heat() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_heat { From 06fb29c66124b6c753fdd262eb262043b4551298 Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Fri, 31 Jan 2014 18:02:07 +0400 Subject: [PATCH 0284/4119] Integration testing preparation for Ironic Add ability to create/register qemu vms for Ironic testing purposes Implements bp:deprecate-baremetal-driver Change-Id: If452438fcc0ff562531b33a36cd189b235654b48 --- extras.d/50-ironic.sh | 7 + files/apts/ironic | 10 + files/rpms/ironic | 9 + lib/baremetal | 7 +- lib/ironic | 265 ++++++++++++++++++- lib/nova_plugins/hypervisor-ironic | 75 ++++++ stackrc | 2 +- tools/install_prereqs.sh | 8 +- tools/ironic/scripts/cleanup-nodes | 25 ++ tools/ironic/scripts/configure-vm | 78 ++++++ tools/ironic/scripts/create-nodes | 68 +++++ tools/ironic/scripts/setup-network | 24 ++ tools/ironic/templates/brbm.xml | 6 + tools/ironic/templates/tftpd-xinetd.template | 11 + tools/ironic/templates/vm.xml | 43 +++ 15 files changed, 630 insertions(+), 8 deletions(-) create mode 100644 files/apts/ironic create mode 100644 files/rpms/ironic create mode 100644 lib/nova_plugins/hypervisor-ironic create mode 100755 tools/ironic/scripts/cleanup-nodes create mode 100755 tools/ironic/scripts/configure-vm create mode 100755 tools/ironic/scripts/create-nodes create mode 100755 tools/ironic/scripts/setup-network create mode 100644 tools/ironic/templates/brbm.xml create mode 100644 tools/ironic/templates/tftpd-xinetd.template create mode 100644 tools/ironic/templates/vm.xml diff --git a/extras.d/50-ironic.sh b/extras.d/50-ironic.sh index 9e61dc5d78..3b8e3d5045 100644 --- a/extras.d/50-ironic.sh +++ b/extras.d/50-ironic.sh @@ -24,10 +24,17 @@ if is_service_enabled ir-api ir-cond; then # Start the ironic API and ironic taskmgr components echo_summary "Starting Ironic" start_ironic + + if [[ "$IRONIC_BAREMETAL_BASIC_OPS" = "True" ]]; then + prepare_baremetal_basic_ops + fi fi if [[ "$1" == "unstack" ]]; then stop_ironic + if [[ "$IRONIC_BAREMETAL_BASIC_OPS" = "True" ]]; then + cleanup_baremetal_basic_ops + fi fi if [[ "$1" == "clean" ]]; then diff --git a/files/apts/ironic b/files/apts/ironic new file mode 100644 index 0000000000..a749ad762e --- /dev/null +++ b/files/apts/ironic @@ -0,0 +1,10 @@ +libguestfs0 +libvirt-bin +openssh-client +openvswitch-switch +openvswitch-datapath-dkms +python-libguestfs +python-libvirt +syslinux +tftpd-hpa +xinetd diff --git a/files/rpms/ironic b/files/rpms/ironic new file mode 100644 index 0000000000..54b98299ee --- /dev/null +++ b/files/rpms/ironic @@ -0,0 +1,9 @@ +libguestfs +libvirt +libvirt-python +openssh-clients +openvswitch +python-libguestfs +syslinux +tftp-server +xinetd diff --git a/lib/baremetal b/lib/baremetal index 1d02e1e417..eda92f97cb 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -140,7 +140,10 @@ BM_DEPLOY_KERNEL=${BM_DEPLOY_KERNEL:-} # If you need to add any extra flavors to the deploy ramdisk image # eg, specific network drivers, specify them here -BM_DEPLOY_FLAVOR=${BM_DEPLOY_FLAVOR:-} +# +# NOTE(deva): this will be moved to lib/ironic in a future patch +# for now, set the default to a suitable value for Ironic's needs +BM_DEPLOY_FLAVOR=${BM_DEPLOY_FLAVOR:--a amd64 ubuntu deploy-ironic} # set URL and version for google shell-in-a-box BM_SHELL_IN_A_BOX=${BM_SHELL_IN_A_BOX:-http://shellinabox.googlecode.com/files/shellinabox-2.14.tar.gz} @@ -220,7 +223,7 @@ function upload_baremetal_deploy { BM_DEPLOY_KERNEL=bm-deploy.kernel BM_DEPLOY_RAMDISK=bm-deploy.initramfs if [ ! -e "$TOP_DIR/files/$BM_DEPLOY_KERNEL" -o ! -e "$TOP_DIR/files/$BM_DEPLOY_RAMDISK" ]; then - $BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR deploy \ + $BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR \ -o $TOP_DIR/files/bm-deploy fi fi diff --git a/lib/ironic b/lib/ironic index b346de1e69..c6fa563e6a 100644 --- a/lib/ironic +++ b/lib/ironic @@ -18,16 +18,19 @@ # - stop_ironic # - cleanup_ironic -# Save trace setting +# Save trace and pipefail settings XTRACE=$(set +o | grep xtrace) +PIPEFAIL=$(set +o | grep pipefail) set +o xtrace - +set +o pipefail # Defaults # -------- # Set up default directories IRONIC_DIR=$DEST/ironic +IRONIC_DATA_DIR=$DATA_DIR/ironic +IRONIC_STATE_PATH=/var/lib/ironic IRONICCLIENT_DIR=$DEST/python-ironicclient IRONIC_AUTH_CACHE_DIR=${IRONIC_AUTH_CACHE_DIR:-/var/cache/ironic} IRONIC_CONF_DIR=${IRONIC_CONF_DIR:-/etc/ironic} @@ -35,6 +38,28 @@ IRONIC_CONF_FILE=$IRONIC_CONF_DIR/ironic.conf IRONIC_ROOTWRAP_CONF=$IRONIC_CONF_DIR/rootwrap.conf IRONIC_POLICY_JSON=$IRONIC_CONF_DIR/policy.json +# Set up defaults for functional / integration testing +IRONIC_SCRIPTS_DIR=${IRONIC_SCRIPTS_DIR:-$TOP_DIR/tools/ironic/scripts} +IRONIC_TEMPLATES_DIR=${IRONIC_TEMPLATES_DIR:-$TOP_DIR/tools/ironic/templates} +IRONIC_BAREMETAL_BASIC_OPS=$(trueorfalse False $IRONIC_BAREMETAL_BASIC_OPS) +IRONIC_SSH_USERNAME=${IRONIC_SSH_USERNAME:-`whoami`} +IRONIC_SSH_KEY_DIR=${IRONIC_SSH_KEY_DIR:-$IRONIC_DATA_DIR/ssh_keys} +IRONIC_SSH_KEY_FILENAME=${IRONIC_SSH_KEY_FILENAME:-ironic_key} +IRONIC_KEY_FILE=$IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME +IRONIC_SSH_VIRT_TYPE=${IRONIC_SSH_VIRT_TYPE:-virsh} +IRONIC_TFTPBOOT_DIR=${IRONIC_TFTPBOOT_DIR:-$IRONIC_DATA_DIR/tftpboot} +IRONIC_VM_SSH_PORT=${IRONIC_VM_SSH_PORT:-2222} +IRONIC_VM_SSH_ADDRESS=${IRONIC_VM_SSH_ADDRESS:-$HOST_IP} +IRONIC_VM_COUNT=${IRONIC_VM_COUNT:-1} +IRONIC_VM_SPECS_CPU=${IRONIC_VM_SPECS_CPU:-1} +IRONIC_VM_SPECS_RAM=${IRONIC_VM_SPECS_RAM:-256} +IRONIC_VM_SPECS_DISK=${IRONIC_VM_SPECS_DISK:-10} +IRONIC_VM_EMULATOR=${IRONIC_VM_EMULATOR:-/usr/bin/qemu-system-x86_64} +IRONIC_VM_NETWORK_BRIDGE=${IRONIC_VM_NETWORK_BRIDGE:-brbm} +IRONIC_VM_NETWORK_RANGE=${IRONIC_VM_NETWORK_RANGE:-192.0.2.0/24} +IRONIC_VM_MACS_CSV_FILE=${IRONIC_VM_MACS_CSV_FILE:-$IRONIC_DATA_DIR/ironic_macs.csv} +IRONIC_AUTHORIZED_KEYS_FILE=${IRONIC_AUTHORIZED_KEYS_FILE:-$HOME/.ssh/authorized_keys} + # Support entry points installation of console scripts IRONIC_BIN_DIR=$(get_python_exec_prefix) @@ -86,8 +111,8 @@ function configure_ironic { iniset $IRONIC_CONF_FILE DEFAULT debug True inicomment $IRONIC_CONF_FILE DEFAULT log_file iniset $IRONIC_CONF_FILE DEFAULT sql_connection `database_connection_url ironic` + iniset $IRONIC_CONF_FILE DEFAULT state_path $IRONIC_STATE_PATH iniset $IRONIC_CONF_FILE DEFAULT use_syslog $SYSLOG - # Configure Ironic conductor, if it was enabled. if is_service_enabled ir-cond; then configure_ironic_conductor @@ -97,6 +122,10 @@ function configure_ironic { if is_service_enabled ir-api; then configure_ironic_api fi + + if [[ "$IRONIC_BAREMETAL_BASIC_OPS" == "True" ]]; then + configure_ironic_auxiliary + fi } # configure_ironic_api() - Is used by configure_ironic(). Performs @@ -125,6 +154,10 @@ function configure_ironic_conductor { cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR iniset $IRONIC_CONF_FILE DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF + iniset $IRONIC_CONF_FILE conductor api_url http://$SERVICE_HOST:6385 + iniset $IRONIC_CONF_FILE pxe tftp_server $SERVICE_HOST + iniset $IRONIC_CONF_FILE pxe tftp_root $IRONIC_TFTPBOOT_DIR + iniset $IRONIC_CONF_FILE pxe tftp_master_path $IRONIC_TFTPBOOT_DIR/master_images } # create_ironic_cache_dir() - Part of the init_ironic() process @@ -225,9 +258,233 @@ function stop_ironic { screen -S $SCREEN_NAME -p ir-cond -X kill } +function is_ironic { + if ( is_service_enabled ir-cond && is_service_enabled ir-api ); then + return 0 + fi + return 1 +} + +function configure_ironic_dirs { + sudo mkdir -p $IRONIC_DATA_DIR + sudo mkdir -p $IRONIC_STATE_PATH + sudo mkdir -p $IRONIC_TFTPBOOT_DIR + sudo chown -R $STACK_USER $IRONIC_DATA_DIR $IRONIC_STATE_PATH + sudo chown -R $STACK_USER:$LIBVIRT_GROUP $IRONIC_TFTPBOOT_DIR + if is_ubuntu; then + PXEBIN=/usr/lib/syslinux/pxelinux.0 + elif is_fedora; then + PXEBIN=/usr/share/syslinux/pxelinux.0 + fi + if [ ! -f $PXEBIN ]; then + die $LINENO "pxelinux.0 (from SYSLINUX) not found." + fi + + cp $PXEBIN $IRONIC_TFTPBOOT_DIR + mkdir -p $IRONIC_TFTPBOOT_DIR/pxelinux.cfg +} + +function ironic_ensure_libvirt_group { + groups $STACK_USER | grep -q $LIBVIRT_GROUP || adduser $STACK_USER $LIBVIRT_GROUP +} + +function create_bridge_and_vms { + ironic_ensure_libvirt_group + + # Call libvirt setup scripts in a new shell to ensure any new group membership + sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/setup-network" + + sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/create-nodes \ + $IRONIC_VM_SPECS_CPU $IRONIC_VM_SPECS_RAM $IRONIC_VM_SPECS_DISK \ + amd64 $IRONIC_VM_COUNT $IRONIC_VM_NETWORK_BRIDGE $IRONIC_VM_EMULATOR" >> $IRONIC_VM_MACS_CSV_FILE + +} + +function enroll_vms { + + CHASSIS_ID=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2) + IRONIC_NET_ID=$(neutron net-list | grep private | get_field 1) + local idx=0 + + # work around; need to know what netns neutron uses for private network + neutron port-create private + + while read MAC; do + + NODE_ID=$(ironic node-create --chassis_uuid $CHASSIS_ID --driver pxe_ssh \ + -i ssh_virt_type=$IRONIC_SSH_VIRT_TYPE \ + -i ssh_address=$IRONIC_VM_SSH_ADDRESS \ + -i ssh_port=$IRONIC_VM_SSH_PORT \ + -i ssh_username=$IRONIC_SSH_USERNAME \ + -i ssh_key_filename=$IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME \ + -p cpus=$IRONIC_VM_SPECS_CPU \ + -p memory_mb=$IRONIC_VM_SPECS_RAM \ + -p local_gb=$IRONIC_VM_SPECS_DISK \ + -p cpu_arch=x86_64 \ + | grep " uuid " | get_field 2) + + ironic port-create --address $MAC --node_uuid $NODE_ID + + idx=$((idx+1)) + + done < $IRONIC_VM_MACS_CSV_FILE + + # create the nova flavor + nova flavor-create baremetal auto $IRONIC_VM_SPECS_RAM $IRONIC_VM_SPECS_DISK $IRONIC_VM_SPECS_CPU + nova flavor-key baremetal set "cpu_arch"="x86_64" "baremetal:deploy_kernel_id"="$BM_DEPLOY_KERNEL_ID" "baremetal:deploy_ramdisk_id"="$BM_DEPLOY_RAMDISK_ID" + + # intentional sleep to make sure the tag has been set to port + sleep 10 + TAPDEV=$(sudo ip netns exec qdhcp-${IRONIC_NET_ID} ip link list | grep tap | cut -d':' -f2 | cut -b2-) + TAG_ID=$(sudo ovs-vsctl show |grep ${TAPDEV} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-) + + # make sure veth pair is not existing, otherwise delete its links + sudo ip link show ovs-tap1 && sudo ip link delete ovs-tap1 + sudo ip link show brbm-tap1 && sudo ip link delete brbm-tap1 + # create veth pair for future interconnection between br-int and brbm + sudo ip link add brbm-tap1 type veth peer name ovs-tap1 + sudo ip link set dev brbm-tap1 up + sudo ip link set dev ovs-tap1 up + + sudo ovs-vsctl -- --if-exists del-port ovs-tap1 -- add-port br-int ovs-tap1 tag=$TAG_ID + sudo ovs-vsctl -- --if-exists del-port brbm-tap1 -- add-port $IRONIC_VM_NETWORK_BRIDGE brbm-tap1 +} + +function configure_tftpd { + # enable tftp natting for allowing connections to SERVICE_HOST's tftp server + sudo modprobe nf_conntrack_tftp + sudo modprobe nf_nat_tftp + + if is_ubuntu; then + PXEBIN=/usr/lib/syslinux/pxelinux.0 + elif is_fedora; then + PXEBIN=/usr/share/syslinux/pxelinux.0 + fi + if [ ! -f $PXEBIN ]; then + die $LINENO "pxelinux.0 (from SYSLINUX) not found." + fi + + # stop tftpd and setup serving via xinetd + stop_service tftpd-hpa || true + [ -f /etc/init/tftpd-hpa.conf ] && echo "manual" | sudo tee /etc/init/tftpd-hpa.override + sudo cp $IRONIC_TEMPLATES_DIR/tftpd-xinetd.template /etc/xinetd.d/tftp + sudo sed -e "s|%TFTPBOOT_DIR%|$IRONIC_TFTPBOOT_DIR|g" -i /etc/xinetd.d/tftp + + # setup tftp file mapping to satisfy requests at the root (booting) and + # /tftpboot/ sub-dir (as per deploy-ironic elements) + echo "r ^([^/]) $IRONIC_TFTPBOOT_DIR/\1" >$IRONIC_TFTPBOOT_DIR/map-file + echo "r ^(/tftpboot/) $IRONIC_TFTPBOOT_DIR/\2" >>$IRONIC_TFTPBOOT_DIR/map-file + + chmod -R 0755 $IRONIC_TFTPBOOT_DIR + restart_service xinetd +} + +function configure_ironic_ssh_keypair { + # Generating ssh key pair for stack user + if [[ ! -d $IRONIC_SSH_KEY_DIR ]]; then + mkdir -p $IRONIC_SSH_KEY_DIR + fi + if [[ ! -d $HOME/.ssh ]]; then + mkdir -p $HOME/.ssh + chmod 700 $HOME/.ssh + fi + echo -e 'n\n' | ssh-keygen -q -t rsa -P '' -f $IRONIC_KEY_FILE + cat $IRONIC_KEY_FILE.pub | tee -a $IRONIC_AUTHORIZED_KEYS_FILE +} + +function ironic_ssh_check { + local KEY_FILE=$1 + local FLOATING_IP=$2 + local PORT=$3 + local DEFAULT_INSTANCE_USER=$4 + local ACTIVE_TIMEOUT=$5 + if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -p $PORT -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success; do sleep 1; done"; then + die $LINENO "server didn't become ssh-able!" + fi +} + +function configure_ironic_sshd { + # Ensure sshd server accepts connections from localhost only + + SSH_CONFIG=/etc/ssh/sshd_config + HOST_PORT=$IRONIC_VM_SSH_ADDRESS:$IRONIC_VM_SSH_PORT + if ! sudo grep ListenAddress $SSH_CONFIG | grep $HOST_PORT; then + echo "ListenAddress $HOST_PORT" | sudo tee -a $SSH_CONFIG + fi + + SSH_SERVICE_NAME=sshd + if is_ubuntu; then + SSH_SERVICE_NAME=ssh + fi + + restart_service $SSH_SERVICE_NAME + # to ensure ssh service is up and running + sleep 3 + ironic_ssh_check $IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME $IRONIC_VM_SSH_ADDRESS $IRONIC_VM_SSH_PORT $IRONIC_SSH_USERNAME 10 + +} + +function configure_ironic_auxiliary { + configure_ironic_dirs + configure_ironic_ssh_keypair + configure_ironic_sshd +} + +function prepare_baremetal_basic_ops { + + # install diskimage-builder + git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH + + # make sure all needed service were enabled + for srv in nova glance key neutron; do + if ! is_service_enabled "$srv"; then + die $LINENO "$srv should be enabled for ironic tests" + fi + done + + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + + # stop all nova services + stop_nova || true + + # remove any nova services failure status + find $SERVICE_DIR/$SCREEN_NAME -name 'n-*.failure' -exec rm -f '{}' \; + + # start them again + start_nova_api + start_nova + + TOKEN=$(keystone token-get | grep ' id ' | get_field 2) + die_if_not_set $LINENO TOKEN "Keystone fail to get token" + + echo_summary "Creating and uploading baremetal images for ironic" + + # build and upload separate deploy kernel & ramdisk + upload_baremetal_deploy $TOKEN + + create_bridge_and_vms + enroll_vms + configure_tftpd +} + +function cleanup_baremetal_basic_ops { + rm -f $IRONIC_VM_MACS_CSV_FILE + if [ -f $IRONIC_KEY_FILE ]; then + KEY=`cat $IRONIC_KEY_FILE.pub` + # remove public key from authorized_keys + grep -v "$KEY" $IRONIC_AUTHORIZED_KEYS_FILE > temp && mv temp $IRONIC_AUTHORIZED_KEYS_FILE + chmod 0600 $IRONIC_AUTHORIZED_KEYS_FILE + fi + sudo rm -rf $IRONIC_DATA_DIR $IRONIC_STATE_PATH + sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/cleanup-nodes $IRONIC_VM_COUNT $IRONIC_VM_NETWORK_BRIDGE" + sudo rm -rf /etc/xinetd.d/tftp /etc/init/tftpd-hpa.override + restart_service xinetd +} -# Restore xtrace +# Restore xtrace + pipefail $XTRACE +$PIPEFAIL # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic new file mode 100644 index 0000000000..5af7c0b292 --- /dev/null +++ b/lib/nova_plugins/hypervisor-ironic @@ -0,0 +1,75 @@ +# lib/nova_plugins/hypervisor-ironic +# Configure the ironic hypervisor + +# Enable with: +# VIRT_DRIVER=ironic + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor { + iniset $NOVA_CONF ironic sql_connection `database_connection_url nova_bm` + LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"} + iniset $NOVA_CONF DEFAULT compute_driver ironic.nova.virt.ironic.IronicDriver + iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER + iniset $NOVA_CONF DEFAULT scheduler_host_manager ironic.nova.scheduler.ironic_host_manager.IronicHostManager + iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0 + iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0 + # ironic section + iniset $NOVA_CONF ironic admin_username admin + iniset $NOVA_CONF ironic admin_password $ADMIN_PASSWORD + iniset $NOVA_CONF ironic admin_url $KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 + iniset $NOVA_CONF ironic admin_tenant_name demo + iniset $NOVA_CONF ironic api_endpoint http://$SERVICE_HOST:6358/v1 +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor { + # This function intentionally left blank + : +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor { + # This function intentionally left blank + : +} + + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stackrc b/stackrc index 456637854b..4a997bf77c 100644 --- a/stackrc +++ b/stackrc @@ -267,7 +267,7 @@ DEFAULT_VIRT_DRIVER=libvirt is_package_installed xenserver-core && DEFAULT_VIRT_DRIVER=xenserver VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER} case "$VIRT_DRIVER" in - libvirt) + ironic|libvirt) LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} if [[ "$os_VENDOR" =~ (Debian) ]]; then LIBVIRT_GROUP=libvirt diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index 0c65fd9b00..9651083cb3 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -55,7 +55,13 @@ export_proxy_variables # ================ # Install package requirements -install_package $(get_packages general $ENABLED_SERVICES) +PACKAGES=$(get_packages general $ENABLED_SERVICES) +if is_ubuntu && echo $PACKAGES | grep -q dkms ; then + # ensure headers for the running kernel are installed for any DKMS builds + PACKAGES="$PACKAGES linux-headers-$(uname -r)" +fi + +install_package $PACKAGES if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then if is_ubuntu || is_fedora; then diff --git a/tools/ironic/scripts/cleanup-nodes b/tools/ironic/scripts/cleanup-nodes new file mode 100755 index 0000000000..dc5a19d1cd --- /dev/null +++ b/tools/ironic/scripts/cleanup-nodes @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +# **cleanup-nodes** + +# Cleans up baremetal poseur nodes and volumes created during ironic setup +# Assumes calling user has proper libvirt group membership and access. + +set -exu + +LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"} + +VM_COUNT=$1 +NETWORK_BRIDGE=$2 + +for (( idx=0; idx<$VM_COUNT; idx++ )); do + NAME="baremetal${NETWORK_BRIDGE}_${idx}" + VOL_NAME="baremetal${NETWORK_BRIDGE}-${idx}.qcow2" + virsh list | grep -q $NAME && virsh destroy $NAME + virsh list --inactive | grep -q $NAME && virsh undefine $NAME + + if virsh pool-list | grep -q $LIBVIRT_STORAGE_POOL ; then + virsh vol-list $LIBVIRT_STORAGE_POOL | grep -q $VOL_NAME && + virsh vol-delete $VOL_NAME --pool $LIBVIRT_STORAGE_POOL + fi +done diff --git a/tools/ironic/scripts/configure-vm b/tools/ironic/scripts/configure-vm new file mode 100755 index 0000000000..9936b76c4f --- /dev/null +++ b/tools/ironic/scripts/configure-vm @@ -0,0 +1,78 @@ +#!/usr/bin/env python + +import argparse +import os.path + +import libvirt + +templatedir = os.path.join(os.path.dirname(os.path.dirname(__file__)), + 'templates') + + +def main(): + parser = argparse.ArgumentParser( + description="Configure a kvm virtual machine for the seed image.") + parser.add_argument('--name', default='seed', + help='the name to give the machine in libvirt.') + parser.add_argument('--image', + help='Use a custom image file (must be qcow2).') + parser.add_argument('--engine', default='qemu', + help='The virtualization engine to use') + parser.add_argument('--arch', default='i686', + help='The architecture to use') + parser.add_argument('--memory', default='2097152', + help="Maximum memory for the VM in KB.") + parser.add_argument('--cpus', default='1', + help="CPU count for the VM.") + parser.add_argument('--bootdev', default='hd', + help="What boot device to use (hd/network).") + parser.add_argument('--network', default="brbm", + help='The libvirt network name to use') + parser.add_argument('--libvirt-nic-driver', default='e1000', + help='The libvirt network driver to use') + parser.add_argument('--emulator', default=None, + help='Path to emulator bin for vm template') + args = parser.parse_args() + with file(templatedir + '/vm.xml', 'rb') as f: + source_template = f.read() + params = { + 'name': args.name, + 'imagefile': args.image, + 'engine': args.engine, + 'arch': args.arch, + 'memory': args.memory, + 'cpus': args.cpus, + 'bootdev': args.bootdev, + 'network': args.network, + 'emulator': args.emulator, + } + + if args.emulator: + params['emulator'] = args.emulator + else: + if os.path.exists("/usr/bin/kvm"): # Debian + params['emulator'] = "/usr/bin/kvm" + elif os.path.exists("/usr/bin/qemu-kvm"): # Redhat + params['emulator'] = "/usr/bin/qemu-kvm" + + nicparams = { + 'nicdriver': args.libvirt_nic_driver, + 'network': args.network, + } + + params['bm_network'] = """ + + + + + +
+""" % nicparams + + libvirt_template = source_template % params + conn = libvirt.open("qemu:///system") + a = conn.defineXML(libvirt_template) + print ("Created machine %s with UUID %s" % (args.name, a.UUIDString())) + +if __name__ == '__main__': + main() diff --git a/tools/ironic/scripts/create-nodes b/tools/ironic/scripts/create-nodes new file mode 100755 index 0000000000..3232b50776 --- /dev/null +++ b/tools/ironic/scripts/create-nodes @@ -0,0 +1,68 @@ +#!/usr/bin/env bash + +# **create-nodes** + +# Creates baremetal poseur nodes for ironic testing purposes + +set -exu + +# Keep track of the devstack directory +TOP_DIR=$(cd $(dirname "$0")/.. && pwd) + +CPU=$1 +MEM=$(( 1024 * $2 )) +# extra G to allow fuzz for partition table : flavor size and registered size +# need to be different to actual size. +DISK=$(( $3 + 1)) + +case $4 in + i386) ARCH='i686' ;; + amd64) ARCH='x86_64' ;; + *) echo "Unsupported arch $4!" ; exit 1 ;; +esac + +TOTAL=$(($5 - 1)) +BRIDGE=$6 +EMULATOR=$7 + +LIBVIRT_NIC_DRIVER=${LIBVIRT_NIC_DRIVER:-"e1000"} +LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"} + +if ! virsh pool-list --all | grep -q $LIBVIRT_STORAGE_POOL; then + virsh pool-define-as --name $LIBVIRT_STORAGE_POOL dir --target /var/lib/libvirt/images >&2 + virsh pool-autostart $LIBVIRT_STORAGE_POOL >&2 + virsh pool-start $LIBVIRT_STORAGE_POOL >&2 +fi + +pool_state=$(virsh pool-info $LIBVIRT_STORAGE_POOL | grep State | awk '{ print $2 }') +if [ "$pool_state" != "running" ] ; then + [ ! -d /var/lib/libvirt/images ] && sudo mkdir /var/lib/libvirt/images + virsh pool-start $LIBVIRT_STORAGE_POOL >&2 +fi + +PREALLOC= +if [ -f /etc/debian_version ]; then + PREALLOC="--prealloc-metadata" +fi + +DOMS="" +for idx in $(seq 0 $TOTAL) ; do + NAME="baremetal${BRIDGE}_${idx}" + DOMS="$DOMS $NAME" + VOL_NAME="baremetal${BRIDGE}-${idx}.qcow2" + (virsh list --all | grep -q $NAME) && continue + + virsh vol-list --pool $LIBVIRT_STORAGE_POOL | grep -q $VOL_NAME && + virsh vol-delete $VOL_NAME --pool $LIBVIRT_STORAGE_POOL >&2 + virsh vol-create-as $LIBVIRT_STORAGE_POOL ${VOL_NAME} ${DISK}G --format qcow2 $PREALLOC >&2 + volume_path=$(virsh vol-path --pool $LIBVIRT_STORAGE_POOL $VOL_NAME) + # Pre-touch the VM to set +C, as it can only be set on empty files. + sudo touch "$volume_path" + sudo chattr +C "$volume_path" || true + $TOP_DIR/scripts/configure-vm --bootdev network --name $NAME --image "$volume_path" --arch $ARCH --cpus $CPU --memory $MEM --libvirt-nic-driver $LIBVIRT_NIC_DRIVER --emulator $EMULATOR --network $BRIDGE >&2 +done + +for dom in $DOMS ; do + # echo mac + virsh dumpxml $dom | grep "mac address" | head -1 | cut -d\' -f2 +done diff --git a/tools/ironic/scripts/setup-network b/tools/ironic/scripts/setup-network new file mode 100755 index 0000000000..8c3ea901b4 --- /dev/null +++ b/tools/ironic/scripts/setup-network @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +# **setup-network** + +# Setups openvswitch libvirt network suitable for +# running baremetal poseur nodes for ironic testing purposes + +set -exu + +# Keep track of the devstack directory +TOP_DIR=$(cd $(dirname "$0")/.. && pwd) +BRIDGE_SUFFIX=${1:-''} +BRIDGE_NAME=brbm$BRIDGE_SUFFIX + +# Only add bridge if missing +(sudo ovs-vsctl list-br | grep ${BRIDGE_NAME}$) || sudo ovs-vsctl add-br ${BRIDGE_NAME} + +# remove bridge before replacing it. +(virsh net-list | grep "${BRIDGE_NAME} ") && virsh net-destroy ${BRIDGE_NAME} +(virsh net-list --inactive | grep "${BRIDGE_NAME} ") && virsh net-undefine ${BRIDGE_NAME} + +virsh net-define <(sed s/brbm/$BRIDGE_NAME/ $TOP_DIR/templates/brbm.xml) +virsh net-autostart ${BRIDGE_NAME} +virsh net-start ${BRIDGE_NAME} diff --git a/tools/ironic/templates/brbm.xml b/tools/ironic/templates/brbm.xml new file mode 100644 index 0000000000..0769d3f1d0 --- /dev/null +++ b/tools/ironic/templates/brbm.xml @@ -0,0 +1,6 @@ + + brbm + + + + diff --git a/tools/ironic/templates/tftpd-xinetd.template b/tools/ironic/templates/tftpd-xinetd.template new file mode 100644 index 0000000000..7b9b0f8a78 --- /dev/null +++ b/tools/ironic/templates/tftpd-xinetd.template @@ -0,0 +1,11 @@ +service tftp +{ + protocol = udp + port = 69 + socket_type = dgram + wait = yes + user = root + server = /usr/sbin/in.tftpd + server_args = -v -v -v -v -v --map-file %TFTPBOOT_DIR%/map-file %TFTPBOOT_DIR% + disable = no +} diff --git a/tools/ironic/templates/vm.xml b/tools/ironic/templates/vm.xml new file mode 100644 index 0000000000..b18dec055f --- /dev/null +++ b/tools/ironic/templates/vm.xml @@ -0,0 +1,43 @@ + + %(name)s + %(memory)s + %(cpus)s + + hvm + + + + + + + + + + destroy + restart + restart + + %(emulator)s + + + + +
+ + +
+ + %(network)s + %(bm_network)s + + +